Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Consolidate e2e logic #3392

Merged
merged 45 commits into from
Mar 12, 2024
Merged
Show file tree
Hide file tree
Changes from 33 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
1c4cb8c
Add initial helpers.go
mociarain Jan 23, 2024
4858ffc
Apply to one file
mociarain Jan 23, 2024
c2b363d
Update doc comment
mociarain Jan 23, 2024
706bec7
Remove unneeded import
mociarain Jan 29, 2024
053b1b2
Leverage the type system for drier code
mociarain Jan 30, 2024
a6fb802
Actually tease out a MVP
mociarain Jan 30, 2024
c76c90a
Tidy up the helpers
mociarain Jan 30, 2024
2511d74
Attempt to conform to Go conventions
mociarain Jan 30, 2024
eec329e
Update adminapi_delete_managedresources.go
mociarain Jan 30, 2024
4c71718
Add list function
mociarain Jan 30, 2024
cdf3220
Update adminapi_drainnode
mociarain Jan 30, 2024
976536f
Update adminapi_kubernetesobjects.go
mociarain Jan 30, 2024
93c98e2
Fix typos
mociarain Jan 30, 2024
ed79ef0
Bring the helpers into the relevant file
mociarain Jan 31, 2024
150d25d
Update the other straightforward cases
mociarain Jan 31, 2024
c0b5802
Lint
mociarain Jan 31, 2024
4ddd70a
Add a GetLogs helper
mociarain Jan 31, 2024
41c13d5
Move project into the helpers
mociarain Jan 31, 2024
e7c1979
Update dns.go
mociarain Jan 31, 2024
08b6967
Use consistent naming]
mociarain Jan 31, 2024
a36830d
Fix import ordering
mociarain Jan 31, 2024
5a6412e
Up the default time
mociarain Feb 1, 2024
b6f4a88
Fix incorrect API
mociarain Feb 1, 2024
f88182d
Increase verifyResolvConfTimeout
mociarain Feb 1, 2024
44c3596
Clean up redeploymv.go
mociarain Feb 1, 2024
4057a88
Reuse the same variable name for the same thing
mociarain Feb 2, 2024
d11fd09
s/(.*)Call/$1Func
mociarain Feb 5, 2024
b842fc5
Bind repeated string to a constant
mociarain Feb 5, 2024
295a74c
Tidy up naming conventions and comments
mociarain Feb 5, 2024
2e27232
Add consistent cleanup logic
mociarain Feb 1, 2024
5edb33e
Tidy up post rebase
mociarain Feb 5, 2024
57fb28a
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 8, 2024
736a832
Add missing returned value
mociarain Feb 8, 2024
8808fe9
Add some retry logic to a flaky part of Project
mociarain Feb 13, 2024
866250f
Actually make the call in GetK8sObjectWithRetry
mociarain Feb 14, 2024
0cb9227
Use a consistent return convention
mociarain Feb 14, 2024
2199362
Remove unneeded logic check
mociarain Feb 14, 2024
2134d1d
Make CleanUp idempotent
mociarain Feb 14, 2024
c3bd2a1
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 19, 2024
e45693d
Use explicit returns
mociarain Feb 19, 2024
4df7584
Merge remote-tracking branch 'origin/master' into consolidate-e2e-logic
mociarain Feb 20, 2024
874fa23
Fix logic in CleanupK8sResource
mociarain Feb 20, 2024
f6d0953
Tidy up GetK8sObjectWithRetry
mociarain Feb 21, 2024
77fa007
Add comment
mociarain Feb 27, 2024
14c9f8e
Remove error return
mociarain Mar 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions test/e2e/adminapi_cluster_getlogs.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,21 @@ func testGetPodLogsOK(ctx context.Context, containerName, podName, namespace str

By("creating a test pod in openshift-azure-operator namespace with some known logs")
pod := mockPod(containerName, podName, namespace, expectedLog)
pod = CreateK8sObjectWithRetry(
CreateK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Create, pod, metav1.CreateOptions{},
)

defer func() {
By("deleting the test pod")
DeleteK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, pod.Name, metav1.DeleteOptions{},
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{},
tiguelu marked this conversation as resolved.
Show resolved Hide resolved
)
}()

By("waiting for the pod to successfully terminate")
Eventually(func(g Gomega, ctx context.Context) {
pod = GetK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, pod.Name, metav1.GetOptions{},
pod, _ = GetK8sObjectWithRetry(
ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{},
)
g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/adminapi_csr.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func testCSRApproveOK(ctx context.Context, objName, namespace string) {

By("checking that the CSR was approved via Kubernetes API")
getFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get
testcsr := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})
testcsr, _ := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})

approved := false
for _, condition := range testcsr.Status.Conditions {
Expand All @@ -95,7 +95,7 @@ func testCSRMassApproveOK(ctx context.Context, namePrefix, namespace string, csr
By("checking that all CSRs were approved via Kubernetes API")
for i := 1; i < csrCount; i++ {
getFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get
testcsr := GetK8sObjectWithRetry(ctx, getFunc, namePrefix+strconv.Itoa(i), metav1.GetOptions{})
testcsr, _ := GetK8sObjectWithRetry(ctx, getFunc, namePrefix+strconv.Itoa(i), metav1.GetOptions{})

approved := false
for _, condition := range testcsr.Status.Conditions {
Expand Down
21 changes: 7 additions & 14 deletions test/e2e/adminapi_delete_managedresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/stringutils"
Expand Down Expand Up @@ -45,29 +44,23 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
var fipConfigID string
var pipAddressID string

const namespace = "default"

By("creating a test service of type loadbalancer")
creationFunc := clients.Kubernetes.CoreV1().Services("default").Create
CreateK8sObjectWithRetry(ctx, creationFunc, &loadBalancerService, metav1.CreateOptions{})

defer func() {
By("cleaning up the k8s loadbalancer service")
// wait for deletion to prevent flakes on retries
Eventually(func(g Gomega, ctx context.Context) {
err := clients.Kubernetes.CoreV1().Services("default").Delete(ctx, "test", metav1.DeleteOptions{})
g.Expect((err == nil || kerrors.IsNotFound(err))).To(BeTrue(), "expect Service to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

By("confirming that the k8s loadbalancer service is gone")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Service](
ctx, clients.Kubernetes.CoreV1().Services(namespace), loadBalancerService.Name,
)
}()

// wait for ingress IP to be assigned as this indicate the service is ready
Eventually(func(g Gomega, ctx context.Context) {
getFunc := clients.Kubernetes.CoreV1().Services("default").Get
service = GetK8sObjectWithRetry(ctx, getFunc, "test", metav1.GetOptions{})
service, _ = GetK8sObjectWithRetry(ctx, getFunc, "test", metav1.GetOptions{})
g.Expect(service.Status.LoadBalancer.Ingress).To(HaveLen(1))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())

Expand Down Expand Up @@ -115,7 +108,7 @@ var _ = Describe("[Admin API] Delete managed resource action", func() {
oc, err := clients.OpenshiftClustersPreview.Get(ctx, vnetResourceGroup, clusterName)
Expect(err).NotTo(HaveOccurred())

// Fake name prevents accidently deleting the PLS but still validates gaurdrail logic works.
// Fake name prevents accidentally deleting the PLS but still validates guardrail logic works.
plsResourceID := fmt.Sprintf("%s/providers/Microsoft.Network/PrivateLinkServices/%s", *oc.OpenShiftClusterProperties.ClusterProfile.ResourceGroupID, "fake-pls")

resp, err := adminRequest(ctx, http.MethodPost, "/admin"+clusterResourceID+"/deletemanagedresource", url.Values{"managedResourceID": []string{plsResourceID}}, true, nil, nil)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/adminapi_drainnode.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func testCordonNodeOK(ctx context.Context, nodeName string) {
Expect(resp.StatusCode).To(Equal(http.StatusOK))

By("checking that node was cordoned via Kubernetes API")
node := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{})
node, _ := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{})

Expect(node.Name).To(Equal(nodeName))
Expect(node.Spec.Unschedulable).Should(BeTrue())
Expand All @@ -89,7 +89,7 @@ func testUncordonNodeOK(ctx context.Context, nodeName string) {
Expect(resp.StatusCode).To(Equal(http.StatusOK))

By("checking that node was uncordoned via Kubernetes API")
node := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{})
node, _ := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{})
Expect(node.Name).To(Equal(nodeName))
Expect(node.Spec.Unschedulable).Should(BeFalse())
}
Expand Down
50 changes: 16 additions & 34 deletions test/e2e/adminapi_kubernetesobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,13 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {
// but we need to remove the object in case of failure
// to allow us to run this test against the same cluster multiple times.
By("deleting the config map via Kubernetes API")
err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.ConfigMap](
ctx, clients.Kubernetes.CoreV1().ConfigMaps(namespace), objName,
)
By("deleting the pod via Kubernetes API")
err = clients.Kubernetes.CoreV1().Pods(namespace).Delete(ctx, objName, metav1.DeleteOptions{})
// On successfully we expect NotFound error
if !kerrors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), objName,
)
}()

testConfigMapCreateOK(ctx, objName, namespace)
Expand Down Expand Up @@ -74,16 +70,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

testConfigMapCreateOrUpdateForbidden(ctx, "creating", objName, namespace)
Expand Down Expand Up @@ -112,16 +101,9 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() {

defer func() {
By("deleting the test customer namespace via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for the test customer namespace to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect Namespace to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Namespace](
ctx, clients.Kubernetes.CoreV1().Namespaces(), namespace,
)
}()

By("creating an object via Kubernetes API")
Expand Down Expand Up @@ -206,7 +188,7 @@ func testConfigMapCreateOK(ctx context.Context, objName, namespace string) {
By("checking that the object was created via Kubernetes API")
getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get

cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})
cm, _ := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})

Expect(obj.Namespace).To(Equal(cm.Namespace))
Expect(obj.Name).To(Equal(cm.Name))
Expand All @@ -230,7 +212,7 @@ func testConfigMapGetOK(ctx context.Context, objName, namespace string, unrestri
By("comparing it to the actual object retrieved via Kubernetes API")
getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get

cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})
cm, _ := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})

Expect(obj.Namespace).To(Equal(cm.Namespace))
Expect(obj.Name).To(Equal(cm.Name))
Expand Down Expand Up @@ -270,7 +252,7 @@ func testConfigMapUpdateOK(ctx context.Context, objName, namespace string) {
By("checking that the object changed via Kubernetes API")
getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get

cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})
cm, _ := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})

Expect(cm.Namespace).To(Equal(namespace))
Expect(cm.Name).To(Equal(objName))
Expand Down Expand Up @@ -375,7 +357,7 @@ func testPodCreateOK(ctx context.Context, containerName, objName, namespace stri
By("checking that the pod was created via Kubernetes API")
getFunc := clients.Kubernetes.CoreV1().Pods(namespace).Get

pod := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})
pod, _ := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{})

Expect(obj.Namespace).To(Equal(pod.Namespace))
Expect(obj.Name).To(Equal(pod.Name))
Expand Down
18 changes: 5 additions & 13 deletions test/e2e/adminapi_redeployvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (

mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/Azure/ARO-RP/pkg/util/ready"
Expand Down Expand Up @@ -65,7 +64,7 @@ var _ = Describe("[Admin API] VM redeploy action", func() {
// wait 1 minute - this will guarantee we pass the minimum (default) threshold of Node heartbeats (40 seconds)
Eventually(func(g Gomega, ctx context.Context) {
getFunc := clients.Kubernetes.CoreV1().Nodes().Get
node := GetK8sObjectWithRetry(ctx, getFunc, *vm.Name, metav1.GetOptions{})
node, _ := GetK8sObjectWithRetry(ctx, getFunc, *vm.Name, metav1.GetOptions{})

g.Expect(ready.NodeIsReady(node)).To(BeTrue())
}).WithContext(ctx).WithTimeout(10 * time.Minute).WithPolling(time.Minute).Should(Succeed())
Expand Down Expand Up @@ -111,22 +110,15 @@ func getNodeUptime(g Gomega, ctx context.Context, node string) (time.Time, error

defer func() {
By("deleting the uptime pod via Kubernetes API")
deleteFunc := clients.Kubernetes.CoreV1().Pods(namespace).Delete
DeleteK8sObjectWithRetry(ctx, deleteFunc, podName, metav1.DeleteOptions{})

// To avoid flakes, we need it to be completely deleted before we can use it again
// in a separate run or in a separate It block
By("waiting for uptime pod to be deleted")
Eventually(func(g Gomega, ctx context.Context) {
_, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect uptime pod to be deleted")
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
CleanupK8sResource[*corev1.Pod](
ctx, clients.Kubernetes.CoreV1().Pods(namespace), podName,
)
}()

By("waiting for uptime pod to move into the Succeeded phase")
g.Eventually(func(g Gomega, ctx context.Context) {
getFunc := clients.Kubernetes.CoreV1().Pods(namespace).Get
pod := GetK8sObjectWithRetry(ctx, getFunc, podName, metav1.GetOptions{})
pod, _ := GetK8sObjectWithRetry(ctx, getFunc, podName, metav1.GetOptions{})

g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded))
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
Expand Down
10 changes: 3 additions & 7 deletions test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,8 @@ var _ = Describe("Cluster", Serial, func() {
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())

DeferCleanup(func(ctx context.Context) {
By("deleting a test namespace")
project.Delete(ctx)
By("verifying the namespace is deleted")
Eventually(func(ctx context.Context) error {
return project.VerifyProjectIsDeleted(ctx)
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil())
By("deleting the test project")
project.CleanUp(ctx)
})
})

Expand Down Expand Up @@ -138,7 +134,7 @@ var _ = Describe("Cluster", Serial, func() {
cluster, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

// Poke the ARO storageaccount controller to reconcile
// Poke the ARO storage account controller to reconcile
cluster.Spec.OperatorFlags[operator.StorageAccountsEnabled] = operator.FlagFalse
cluster, err = clients.AROClusters.AroV1alpha1().Clusters().Update(ctx, cluster, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/disk_encryption.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ var _ = Describe("Disk encryption at rest", func() {
}

By("making sure the encrypted storage class is default")
sc := GetK8sObjectWithRetry(
sc, _ := GetK8sObjectWithRetry(
ctx, clients.Kubernetes.StorageV1().StorageClasses().Get, defaultStorageClass, metav1.GetOptions{},
)

Expand Down
42 changes: 34 additions & 8 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,11 @@ type K8sDeleteFunc func(ctx context.Context, name string, options metav1.DeleteO
// asserting there were no errors.
func GetK8sObjectWithRetry[T kruntime.Object](
ctx context.Context, getFunc K8sGetFunc[T], name string, options metav1.GetOptions,
) T {
var object T
) (result T, err error) {
Eventually(func(g Gomega, ctx context.Context) {
result, err := getFunc(ctx, name, options)
g.Expect(err).NotTo(HaveOccurred())
object = result
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return object
return result, err
}

// GetK8sPodLogsWithRetry gets the logs for the specified pod in the named namespace. It gets them with some
Expand Down Expand Up @@ -93,11 +90,40 @@ func CreateK8sObjectWithRetry[T kruntime.Object](
// and the parameters for it. It then makes the call with some retry logic.
func DeleteK8sObjectWithRetry(
ctx context.Context, deleteFunc K8sDeleteFunc, name string, options metav1.DeleteOptions,
) {
) (err error) {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I expanded this to return an error so I could match on a NotFound in the Cleanup. An alternative I considered was to have a second Delete function that would return the error but returning the error seemed like the more "Go" thing to do even though it introduced all these underscores.

What do people think?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am probably missing something, but In which cases is this function used in the cleanup? Isn't the cleanup using directly the Delete method from the clients.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sometimes we just need to delete something: https://github.com/azure/ARO-RP/blob/77fa007891c18eeee0f217b74ff47e0e7c730a54/test/e2e/operator.go#L191

IIUC, the justification for returning the error was because it was needed to detect NotFoud, but I don't see where the error being returned is used for that. The Delete functions in the cleanup function is using the Delete methods from the clients, vs the DeleteK8sObjectWithRetry function.

Eventually(func(g Gomega, ctx context.Context) {
err := deleteFunc(ctx, name, options)
g.Expect(err).NotTo(HaveOccurred())
}).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed())
return
}

type AllowedCleanUpAPIInterface[T kruntime.Object] interface {
Get(ctx context.Context, name string, opts metav1.GetOptions) (T, error)
Delete(ctx context.Context, name string, options metav1.DeleteOptions) error
}

// CleanupK8sResource takes a client that knows how to issue a GET and DELETE call for a given resource.
// It then issues a delete request then and polls the API to until the resource is no longer found.
//
// Note: If the DELETE request receives a 404 we assume the resource has been cleaned up successfully.
func CleanupK8sResource[T kruntime.Object](
ctx context.Context, client AllowedCleanUpAPIInterface[T], name string,
) {
DefaultEventuallyTimeout = 10 * time.Minute
PollingInterval = 1 * time.Second
err := DeleteK8sObjectWithRetry(
ctx, client.Delete, name, metav1.DeleteOptions{},
)

if err != nil && kerrors.IsNotFound(err) {
return
}

Eventually(func(g Gomega, ctx context.Context) {
mociarain marked this conversation as resolved.
Show resolved Hide resolved
_, err := GetK8sObjectWithRetry(ctx, client.Get, name, metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}

type Project struct {
Expand Down Expand Up @@ -149,15 +175,15 @@ func (p Project) VerifyProjectIsReady(ctx context.Context) error {
}, metav1.CreateOptions{})

By("getting the relevant SA")
sa := GetK8sObjectWithRetry(
sa, _ := GetK8sObjectWithRetry(
ctx, p.cli.CoreV1().ServiceAccounts(p.Name).Get, "default", metav1.GetOptions{},
)

if len(sa.Secrets) == 0 {
return fmt.Errorf("default ServiceAccount does not have secrets")
}

project := GetK8sObjectWithRetry(
project, _ := GetK8sObjectWithRetry(
ctx, p.projectClient.ProjectV1().Projects().Get, p.Name, metav1.GetOptions{},
)
_, found := project.Annotations["openshift.io/sa.scc.uid-range"]
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ var _ = Describe("ARO Operator - Cluster Monitoring ConfigMap", func() {
getFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Get

By("waiting for the ConfigMap to make sure it exists")
cm = GetK8sObjectWithRetry(ctx, getFunc, "cluster-monitoring-config", metav1.GetOptions{})
cm, _ = GetK8sObjectWithRetry(ctx, getFunc, "cluster-monitoring-config", metav1.GetOptions{})

By("unmarshalling the config from the ConfigMap data")
var configData monitoring.Config
Expand Down Expand Up @@ -706,7 +706,7 @@ var _ = Describe("ARO Operator - Cloud Provider Config ConfigMap", func() {

By("waiting for the ConfigMap to make sure it exists")
getFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-config").Get
cm := GetK8sObjectWithRetry(ctx, getFunc, "cloud-provider-config", metav1.GetOptions{})
cm, _ := GetK8sObjectWithRetry(ctx, getFunc, "cloud-provider-config", metav1.GetOptions{})

By("waiting for disableOutboundSNAT to be true")
Eventually(func(g Gomega, ctx context.Context) {
Expand Down
Loading
Loading