Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Consolidate e2e logic #3392

Merged
merged 45 commits into from
Mar 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
1c4cb8c
Add initial helpers.go
mociarain Jan 23, 2024
4858ffc
Apply to one file
mociarain Jan 23, 2024
c2b363d
Update doc comment
mociarain Jan 23, 2024
706bec7
Remove unneeded import
mociarain Jan 29, 2024
053b1b2
Leverage the type system for drier code
mociarain Jan 30, 2024
a6fb802
Actually tease out a MVP
mociarain Jan 30, 2024
c76c90a
Tidy up the helpers
mociarain Jan 30, 2024
2511d74
Attempt to conform to Go conventions
mociarain Jan 30, 2024
eec329e
Update adminapi_delete_managedresources.go
mociarain Jan 30, 2024
4c71718
Add list function
mociarain Jan 30, 2024
cdf3220
Update adminapi_drainnode
mociarain Jan 30, 2024
976536f
Update adminapi_kubernetesobjects.go
mociarain Jan 30, 2024
93c98e2
Fix typos
mociarain Jan 30, 2024
ed79ef0
Bring the helpers into the relevant file
mociarain Jan 31, 2024
150d25d
Update the other straightforward cases
mociarain Jan 31, 2024
c0b5802
Lint
mociarain Jan 31, 2024
4ddd70a
Add a GetLogs helper
mociarain Jan 31, 2024
41c13d5
Move project into the helpers
mociarain Jan 31, 2024
e7c1979
Update dns.go
mociarain Jan 31, 2024
08b6967
Use consistent naming]
mociarain Jan 31, 2024
a36830d
Fix import ordering
mociarain Jan 31, 2024
5a6412e
Up the default time
mociarain Feb 1, 2024
b6f4a88
Fix incorrect API
mociarain Feb 1, 2024
f88182d
Increase verifyResolvConfTimeout
mociarain Feb 1, 2024
44c3596
Clean up redeploymv.go
mociarain Feb 1, 2024
4057a88
Reuse the same variable name for the same thing
mociarain Feb 2, 2024
d11fd09
s/(.*)Call/$1Func
mociarain Feb 5, 2024
b842fc5
Bind repeated string to a constant
mociarain Feb 5, 2024
295a74c
Tidy up naming conventions and comments
mociarain Feb 5, 2024
2e27232
Add consistent cleanup logic
mociarain Feb 1, 2024
5edb33e
Tidy up post rebase
mociarain Feb 5, 2024
57fb28a
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 8, 2024
736a832
Add missing returned value
mociarain Feb 8, 2024
8808fe9
Add some retry logic to a flaky part of Project
mociarain Feb 13, 2024
866250f
Actually make the call in GetK8sObjectWithRetry
mociarain Feb 14, 2024
0cb9227
Use a consistent return convention
mociarain Feb 14, 2024
2199362
Remove unneeded logic check
mociarain Feb 14, 2024
2134d1d
Make CleanUp idempotent
mociarain Feb 14, 2024
c3bd2a1
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 19, 2024
e45693d
Use explicit returns
mociarain Feb 19, 2024
4df7584
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 20, 2024
874fa23
Fix logic in CleanupK8sResource
mociarain Feb 20, 2024
f6d0953
Tidy up GetK8sObjectWithRetry
mociarain Feb 21, 2024
77fa007
Add comment
mociarain Feb 27, 2024
14c9f8e
Remove error return
mociarain Mar 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions test/e2e/adminapi_cluster_getlogs.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,21 @@ func testGetPodLogsOK(ctx context.Context, containerName, podName, namespace str

By("creating a test pod in openshift-azure-operator namespace with some known logs")
pod := mockPod(containerName, podName, namespace, expectedLog)
pod = CreateK8sObjectWithRetry(
CreateK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Create, pod, metav1.CreateOptions{},
)

defer func() {
By("deleting the test pod")
DeleteK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, pod.Name, metav1.DeleteOptions{},
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{},
tiguelu marked this conversation as resolved.
Show resolved Hide resolved
)
}()

By("waiting for the pod to successfully terminate")
Eventually(func(g Gomega, ctx context.Context) {
pod = GetK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, pod.Name, metav1.GetOptions{},
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{},
)
g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
Expand Down
25 changes: 9 additions & 16 deletions test/e2e/adminapi_delete_managedresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/stringutils"
Expand Down Expand Up @@ -45,29 +44,23 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
var fipConfigID string
var pipAddressID string

const namespace = "default"

By("creating a test service of type loadbalancer")
creationFunc := clients.Kubernetes.CoreV1().Services("default").Create
creationFunc := clients.Kubernetes.CoreV1().Services(namespace).Create
CreateK8sObjectWithRetry(ctx, creationFunc, &loadBalancerService, metav1.CreateOptions{})

defer func() {
By("cleaning up the k8s loadbalancer service")
// wait for deletion to prevent flakes on retries
Eventually(func(g Gomega, ctx context.Context) {
err := clients.Kubernetes.CoreV1().Services("default").Delete(ctx, "test", metav1.DeleteOptions{})
g.Expect((err == nil || kerrors.IsNotFound(err))).To(BeTrue(), "expect Service to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

By("confirming that the k8s loadbalancer service is gone")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Service](
ctx, clients.Kubernetes.CoreV1().Services(namespace), loadBalancerService.Name,
)
}()

// wait for ingress IP to be assigned as this indicate the service is ready
Eventually(func(g Gomega, ctx context.Context) {
getFunc := clients.Kubernetes.CoreV1().Services("default").Get
service = GetK8sObjectWithRetry(ctx, getFunc, "test", metav1.GetOptions{})
getFunc := clients.Kubernetes.CoreV1().Services(namespace).Get
service = GetK8sObjectWithRetry(ctx, getFunc, loadBalancerService.Name, metav1.GetOptions{})
g.Expect(service.Status.LoadBalancer.Ingress).To(HaveLen(1))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

Expand Down Expand Up @@ -115,7 +108,7 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
oc, err := clients.OpenshiftClustersPreview.Get(ctx, vnetResourceGroup, clusterName)
Expect(err).NotTo(HaveOccurred())

// Fake name prevents accidently deleting the PLS but still validates gaurdrail logic works.
// Fake name prevents accidentally deleting the PLS but still validates guardrail logic works.
plsResourceID := fmt.Sprintf("%s/providers/Microsoft.Network/PrivateLinkServices/%s", *oc.OpenShiftClusterProperties.ClusterProfile.ResourceGroupID, "fake-pls")

resp, err := adminRequest(ctx, http.MethodPost, "/admin"+clusterResourceID+"/deletemanagedresource", url.Values{"managedResourceID": []string{plsResourceID}}, true, nil, nil)
Expand Down
42 changes: 12 additions & 30 deletions test/e2e/adminapi_kubernetesobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,13 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {
// but we need to remove the object in case of failure
// to allow us to run this test against the same cluster multiple times.
By("deleting the config map via Kubernetes API")
err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.ConfigMap](
ctx, clients.Kubernetes.CoreV1().ConfigMaps(namespace), objName,
)
By("deleting the pod via Kubernetes API")
err = clients.Kubernetes.CoreV1().Pods(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), objName,
)
}()

testConfigMapCreateOK(ctx, objName, namespace)
Expand Down Expand Up @@ -74,16 +70,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

testConfigMapCreateOrUpdateForbidden(ctx, "creating", objName, namespace)
Expand Down Expand Up @@ -112,16 +101,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

By("creating an object via Kubernetes API")
Expand Down
14 changes: 3 additions & 11 deletions test/e2e/adminapi_redeployvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (

mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/ready"
Expand Down Expand Up @@ -111,16 +110,9 @@ func getNodeUptime(g Gomega, ctx context.Context, node string) (time.Time, error

defer func() {
By("deleting the uptime pod via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Pods(namespace).Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, podName, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for uptime pod to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect uptime pod to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), podName,
)
}()

By("waiting for uptime pod to move into the Succeeded phase")
Expand Down
10 changes: 3 additions & 7 deletions test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,8 @@ var _ = Describe("Cluster", Serial, func() {
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())

DeferCleanup(func(ctx context.Context) {
By("deleting a test namespace")
project.Delete(ctx)
By("verifying the namespace is deleted")
Eventually(func(ctx context.Context) error {
return project.VerifyProjectIsDeleted(ctx)
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())
By("deleting the test project")
project.CleanUp(ctx)
})
})

Expand Down Expand Up @@ -138,7 +134,7 @@ var _ = Describe("Cluster", Serial, func() {
cluster, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

// Poke the ARO storageaccount controller to reconcile
// Poke the ARO storage account controller to reconcile
cluster.Spec.OperatorFlags[operator.StorageAccountsEnabled] = operator.FlagFalse
cluster, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, cluster, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
Expand Down
70 changes: 45 additions & 25 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,13 @@ type K8sDeleteFunc func(ctx context.Context, name string, options metav1.DeleteO
// asserting there were no errors.
func GetK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, getFunc K8sGetFunc[T], name string, options metav1.GetOptions,
) T {
var object T
) (result T) {
tiguelu marked this conversation as resolved.
Show resolved Hide resolved
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := getFunc(ctx, name, options)
result, err = getFunc(ctx, name, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// GetK8sPodLogsWithRetry gets the logs for the specified pod in the named namespace. It gets them with some
Expand All @@ -56,37 +55,35 @@ func GetK8sPodLogsWithRetry(
g.Expect(err).NotTo(HaveOccurred())
rawBody = string(body)
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return
return rawBody
}

// ListK8sObjectWithRetry takes a list function like clients.Kubernetes.CoreV1().Nodes().List and the
// parameters for it. It then makes the call with some retry logic and returns the result after
// asserting there were no errors.
func ListK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, listFunc K8sListFunc[T], options metav1.ListOptions,
) T {
var object T
) (result T) {
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := listFunc(ctx, options)
result, err = listFunc(ctx, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// CreateK8sObjectWithRetry takes a create function like clients.Kubernetes.CoreV1().Pods(namespace).Create
// and the parameters for it. It then makes the call with some retry logic and returns the result after
// asserting there were no errors.
func CreateK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, createFunc K8sCreateFunc[T], obj T, options metav1.CreateOptions,
) T {
var object T
) (result T) {
var err error
Eventually(func(g Gomega, ctx context.Context) {
result, err := createFunc(ctx, obj, options)
result, err = createFunc(ctx, obj, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result
}

// DeleteK8sObjectWithRetry takes a delete function like clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Delete
Expand All @@ -100,6 +97,32 @@ func DeleteK8sObjectWithRetry(
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
}

type AllowedCleanUpAPIInterface[T kruntime.Object] interface {
Get(ctx context.Context, name string, opts metav1.GetOptions) (T, error)
Delete(ctx context.Context, name string, options metav1.DeleteOptions) error
}

// CleanupK8sResource takes a client that knows how to issue a GET and DELETE call for a given resource.
// It then issues a delete request then and polls the API until the resource is no longer found.
//
// Note: If the DELETE request receives a 404 we assume the resource has been cleaned up successfully.
func CleanupK8sResource[T kruntime.Object](
ctx context.Context, client AllowedCleanUpAPIInterface[T], name string,
) {
DefaultEventuallyTimeout = 10 * time.Minute
PollingInterval = 1 * time.Second
Eventually(func(g Gomega, ctx context.Context) {
err := client.Delete(ctx, name, metav1.DeleteOptions{})
g.Expect((err == nil || kerrors.IsNotFound(err))).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())

// GET the resource until NOT_FOUND to ensure it's been deleted.
Eventually(func(g Gomega, ctx context.Context) {
mociarain marked this conversation as resolved.
Show resolved Hide resolved
_, err := client.Get(ctx, name, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}

type Project struct {
projectClient projectclient.Interface
cli kubernetes.Interface
Expand Down Expand Up @@ -167,13 +190,10 @@ func (p Project) VerifyProjectIsReady(ctx context.Context) error {
return nil
}

// VerifyProjectIsDeleted verifies that the project does not exist and returns error if a project exists
// or if it encounters an error other than NotFound
func (p Project) VerifyProjectIsDeleted(ctx context.Context) error {
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return nil
}

return fmt.Errorf("Project exists")
// VerifyProjectIsDeleted verifies that the project does not exist by polling it.
func (p Project) VerifyProjectIsDeleted(ctx context.Context) {
Eventually(func(g Gomega, ctx context.Context) {
_, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}
Loading