Skip to content

Commit

Permalink
Consolidate e2e logic (#3392)
Browse files Browse the repository at this point in the history
Consolidate e2e logic
  • Loading branch information
mociarain authored Mar 12, 2024
1 parent 7a415b0 commit 0eea1ec
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 92 deletions.
6 changes: 3 additions & 3 deletions test/e2e/adminapi_cluster_getlogs.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,21 @@ func testGetPodLogsOK(ctx context.Context, containerName, podName, namespace str

By("creating a test pod in openshift-azure-operator namespace with some known logs")
pod := mockPod(containerName, podName, namespace, expectedLog)
pod = CreateK8sObjectWithRetry(
CreateK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Create, pod, metav1.CreateOptions{},
)

defer func() {
By("deleting the test pod")
DeleteK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, pod.Name, metav1.DeleteOptions{},
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{},
)
}()

By("waiting for the pod to successfully terminate")
Eventually(func(g Gomega, ctx context.Context) {
pod = GetK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, pod.Name, metav1.GetOptions{},
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{},
)
g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
Expand Down
25 changes: 9 additions & 16 deletions test/e2e/adminapi_delete_managedresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/stringutils"
Expand Down Expand Up @@ -45,29 +44,23 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
var fipConfigID string
var pipAddressID string

const namespace = "default"

By("creating a test service of type loadbalancer")
creationFunc := clients.Kubernetes.CoreV1().Services("default").Create
creationFunc := clients.Kubernetes.CoreV1().Services(namespace).Create
CreateK8sObjectWithRetry(ctx, creationFunc, &loadBalancerService, metav1.CreateOptions{})

defer func() {
By("cleaning up the k8s loadbalancer service")
// wait for deletion to prevent flakes on retries
Eventually(func(g Gomega, ctx context.Context) {
err := clients.Kubernetes.CoreV1().Services("default").Delete(ctx, "test", metav1.DeleteOptions{})
g.Expect((err == nil || kerrors.IsNotFound(err))).To(BeTrue(), "expect Service to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

By("confirming that the k8s loadbalancer service is gone")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Service](
ctx, clients.Kubernetes.CoreV1().Services(namespace), loadBalancerService.Name,
)
}()

// wait for ingress IP to be assigned as this indicate the service is ready
Eventually(func(g Gomega, ctx context.Context) {
getFunc := clients.Kubernetes.CoreV1().Services("default").Get
service = GetK8sObjectWithRetry(ctx, getFunc, "test", metav1.GetOptions{})
getFunc := clients.Kubernetes.CoreV1().Services(namespace).Get
service = GetK8sObjectWithRetry(ctx, getFunc, loadBalancerService.Name, metav1.GetOptions{})
g.Expect(service.Status.LoadBalancer.Ingress).To(HaveLen(1))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

Expand Down Expand Up @@ -115,7 +108,7 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
oc, err := clients.OpenshiftClustersPreview.Get(ctx, vnetResourceGroup, clusterName)
Expect(err).NotTo(HaveOccurred())

// Fake name prevents accidently deleting the PLS but still validates gaurdrail logic works.
// Fake name prevents accidentally deleting the PLS but still validates guardrail logic works.
plsResourceID := fmt.Sprintf("%s/providers/Microsoft.Network/PrivateLinkServices/%s", *oc.OpenShiftClusterProperties.ClusterProfile.ResourceGroupID, "fake-pls")

resp, err := adminRequest(ctx, http.MethodPost, "/admin"+clusterResourceID+"/deletemanagedresource", url.Values{"managedResourceID": []string{plsResourceID}}, true, nil, nil)
Expand Down
42 changes: 12 additions & 30 deletions test/e2e/adminapi_kubernetesobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,13 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {
// but we need to remove the object in case of failure
// to allow us to run this test against the same cluster multiple times.
By("deleting the config map via Kubernetes API")
err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.ConfigMap](
ctx, clients.Kubernetes.CoreV1().ConfigMaps(namespace), objName,
)
By("deleting the pod via Kubernetes API")
err = clients.Kubernetes.CoreV1().Pods(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), objName,
)
}()

testConfigMapCreateOK(ctx, objName, namespace)
Expand Down Expand Up @@ -74,16 +70,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

testConfigMapCreateOrUpdateForbidden(ctx, "creating", objName, namespace)
Expand Down Expand Up @@ -112,16 +101,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

By("creating an object via Kubernetes API")
Expand Down
14 changes: 3 additions & 11 deletions test/e2e/adminapi_redeployvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (

mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/ready"
Expand Down Expand Up @@ -111,16 +110,9 @@ func getNodeUptime(g Gomega, ctx context.Context, node string) (time.Time, error

defer func() {
By("deleting the uptime pod via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Pods(namespace).Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, podName, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for uptime pod to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect uptime pod to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), podName,
)
}()

By("waiting for uptime pod to move into the Succeeded phase")
Expand Down
10 changes: 3 additions & 7 deletions test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,8 @@ var _ = Describe("Cluster", Serial, func() {
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())

DeferCleanup(func(ctx context.Context) {
By("deleting a test namespace")
project.Delete(ctx)
By("verifying the namespace is deleted")
Eventually(func(ctx context.Context) error {
return project.VerifyProjectIsDeleted(ctx)
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())
By("deleting the test project")
project.CleanUp(ctx)
})
})

Expand Down Expand Up @@ -138,7 +134,7 @@ var _ = Describe("Cluster", Serial, func() {
cluster, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

// Poke the ARO storageaccount controller to reconcile
// Poke the ARO storage account controller to reconcile
cluster.Spec.OperatorFlags[operator.StorageAccountsEnabled] = operator.FlagFalse
cluster, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, cluster, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
Expand Down
70 changes: 45 additions & 25 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,13 @@ type K8sDeleteFunc func(ctx context.Context, name string, options metav1.DeleteO
// asserting there were no errors.
func GetK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, getFunc K8sGetFunc[T], name string, options metav1.GetOptions,
) T {
var object T
) (result T) {
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := getFunc(ctx, name, options)
result, err = getFunc(ctx, name, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// GetK8sPodLogsWithRetry gets the logs for the specified pod in the named namespace. It gets them with some
Expand All @@ -56,37 +55,35 @@ func GetK8sPodLogsWithRetry(
g.Expect(err).NotTo(HaveOccurred())
rawBody = string(body)
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return
return rawBody
}

// ListK8sObjectWithRetry takes a list function like clients.Kubernetes.CoreV1().Nodes().List and the
// parameters for it. It then makes the call with some retry logic and returns the result after
// asserting there were no errors.
func ListK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, listFunc K8sListFunc[T], options metav1.ListOptions,
) T {
var object T
) (result T) {
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := listFunc(ctx, options)
result, err = listFunc(ctx, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// CreateK8sObjectWithRetry takes a create function like clients.Kubernetes.CoreV1().Pods(namespace).Create
// and the parameters for it. It then makes the call with some retry logic and returns the result after
// asserting there were no errors.
func CreateK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, createFunc K8sCreateFunc[T], obj T, options metav1.CreateOptions,
) T {
var object T
) (result T) {
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := createFunc(ctx, obj, options)
result, err = createFunc(ctx, obj, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// DeleteK8sObjectWithRetry takes a delete function like clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Delete
Expand All @@ -100,6 +97,32 @@ func DeleteK8sObjectWithRetry(
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
}

type AllowedCleanUpAPIInterface[T kruntime.Object] interface {
Get(ctx context.Context, name string, opts metav1.GetOptions) (T, error)
Delete(ctx context.Context, name string, options metav1.DeleteOptions) error
}

// CleanupK8sResource takes a client that knows how to issue a GET and DELETE call for a given resource.
// It then issues a delete request then and polls the API until the resource is no longer found.
//
// Note: If the DELETE request receives a 404 we assume the resource has been cleaned up successfully.
func CleanupK8sResource[T kruntime.Object](
ctx context.Context, client AllowedCleanUpAPIInterface[T], name string,
) {
DefaultEventuallyTimeout = 10 * time.Minute
PollingInterval = 1 * time.Second
Eventually(func(g Gomega, ctx context.Context) {
err := client.Delete(ctx, name, metav1.DeleteOptions{})
g.Expect((err == nil || kerrors.IsNotFound(err))).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())

// GET the resource until NOT_FOUND to ensure it's been deleted.
Eventually(func(g Gomega, ctx context.Context) {
_, err := client.Get(ctx, name, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}

type Project struct {
projectClient projectclient.Interface
cli kubernetes.Interface
Expand Down Expand Up @@ -167,13 +190,10 @@ func (p Project) VerifyProjectIsReady(ctx context.Context) error {
return nil
}

// VerifyProjectIsDeleted verifies that the project does not exist and returns error if a project exists
// or if it encounters an error other than NotFound
func (p Project) VerifyProjectIsDeleted(ctx context.Context) error {
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}

return fmt.Errorf("Project exists")
// VerifyProjectIsDeleted verifies that the project does not exist by polling it.
func (p Project) VerifyProjectIsDeleted(ctx context.Context) {
Eventually(func(g Gomega, ctx context.Context) {
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}

0 comments on commit 0eea1ec

Please sign in to comment.