Skip to content

Commit

Permalink
Fix e2e test for azurefile-csi storage class that uses ARO-managed st…
Browse files Browse the repository at this point in the history
…orage account on OCP 4.11 (Azure#3226)

* Fix PVC name in PVC status check
* Limit test scope and verify that test works if FIPS is disabled
* Mark test pending again, leave comment about it, and revert debugging
stuff
  • Loading branch information
kimorris27 authored Nov 1, 2023
1 parent a0cc0ee commit f042c81
Showing 1 changed file with 37 additions and 24 deletions.
61 changes: 37 additions & 24 deletions test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ import (
"github.com/Azure/ARO-RP/test/util/project"
)

const (
testPVCName = "e2e-test-claim"
)

var _ = Describe("Cluster", Serial, func() {
var p project.Project

Expand Down Expand Up @@ -80,13 +84,17 @@ var _ = Describe("Cluster", Serial, func() {
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
})

// TODO: this test is marked as pending as it isn't working as expected
// TODO: This test is marked as Pending because CI clusters are FIPS-enabled, and Azure File storage
// doesn't work with FIPS-enabled clusters: https://learn.microsoft.com/en-us/azure/openshift/howto-enable-fips-openshift#support-for-fips-cryptography
//
// We should enable this test when/if FIPS becomes toggleable post-install in the future.
It("which is using the default Azure File storage class backed by the cluster storage account", Pending, func(ctx context.Context) {
By("adding the Microsoft.Storage service endpoint to each cluster subnet")
By("adding the Microsoft.Storage service endpoint to each cluster subnet (if needed)")

oc, err := clients.OpenshiftClusters.Get(ctx, vnetResourceGroup, clusterName)
Expect(err).NotTo(HaveOccurred())
ocpSubnets := clusterSubnets(oc)
subnetAlreadyHasStorageEndpoint := false

for _, s := range ocpSubnets {
vnetID, subnetName, err := apisubnet.Split(s)
Expand All @@ -108,16 +116,14 @@ var _ = Describe("Cluster", Serial, func() {

// Check whether service endpoint is already there before trying to add
// it; trying to add a duplicate results in an error
subnetHasStorageEndpoint := false

for _, se := range *mgmtSubnet.ServiceEndpoints {
if se.Service != nil && *se.Service == "Microsoft.Storage" {
subnetHasStorageEndpoint = true
subnetAlreadyHasStorageEndpoint = true
break
}
}

if !subnetHasStorageEndpoint {
if !subnetAlreadyHasStorageEndpoint {
storageEndpoint := mgmtnetwork.ServiceEndpointPropertiesFormat{
Service: to.StringPtr("Microsoft.Storage"),
Locations: &[]string{"*"},
Expand Down Expand Up @@ -180,32 +186,35 @@ var _ = Describe("Cluster", Serial, func() {
s, err := clients.Kubernetes.AppsV1().StatefulSets(p.Name).Get(ctx, ssName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready")
GinkgoWriter.Println(s)

pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(p.Name).Get(ctx, ssName, metav1.GetOptions{})
pvcName := statefulSetPVCName(ssName, testPVCName, 0)
pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(p.Name).Get(ctx, pvcName, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
GinkgoWriter.Println(pvc)
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

By("cleaning up the cluster subnets (removing service endpoints)")
for _, s := range ocpSubnets {
vnetID, subnetName, err := apisubnet.Split(s)
Expect(err).NotTo(HaveOccurred())
// The cluster subnets should always have endpoints in CI since CI doesn't have the gateway, but being safe
By("cleaning up the cluster subnets (i.e. removing service endpoints if appropriate)")
if !subnetAlreadyHasStorageEndpoint {
for _, s := range ocpSubnets {
vnetID, subnetName, err := apisubnet.Split(s)
Expect(err).NotTo(HaveOccurred())

vnetR, err := azure.ParseResourceID(vnetID)
Expect(err).NotTo(HaveOccurred())
vnetR, err := azure.ParseResourceID(vnetID)
Expect(err).NotTo(HaveOccurred())

mgmtSubnet, err := clients.Subnet.Get(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, "")
Expect(err).NotTo(HaveOccurred())
mgmtSubnet, err := clients.Subnet.Get(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, "")
Expect(err).NotTo(HaveOccurred())

if mgmtSubnet.SubnetPropertiesFormat == nil {
mgmtSubnet.SubnetPropertiesFormat = &mgmtnetwork.SubnetPropertiesFormat{}
}
if mgmtSubnet.SubnetPropertiesFormat == nil {
mgmtSubnet.SubnetPropertiesFormat = &mgmtnetwork.SubnetPropertiesFormat{}
}

mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints = &[]mgmtnetwork.ServiceEndpointPropertiesFormat{}
mgmtSubnet.SubnetPropertiesFormat.ServiceEndpoints = &[]mgmtnetwork.ServiceEndpointPropertiesFormat{}

err = clients.Subnet.CreateOrUpdateAndWait(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, mgmtSubnet)
Expect(err).NotTo(HaveOccurred())
err = clients.Subnet.CreateOrUpdateAndWait(ctx, vnetResourceGroup, vnetR.ResourceName, subnetName, mgmtSubnet)
Expect(err).NotTo(HaveOccurred())
}
}
})

Expand Down Expand Up @@ -311,7 +320,7 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: ssName,
Name: testPVCName,
MountPath: "/data",
ReadOnly: false,
},
Expand All @@ -323,7 +332,7 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace,
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: ssName,
Name: testPVCName,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
Expand All @@ -343,6 +352,10 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace,
return ssName, err
}

func statefulSetPVCName(ssName string, claimName string, ordinal int) string {
return fmt.Sprintf("%s-%s-%d", claimName, ssName, ordinal)
}

func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, name, namespace string, annotations map[string]string) error {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Expand Down

0 comments on commit f042c81

Please sign in to comment.