diff --git a/test/e2e/adminapi_cluster_getlogs.go b/test/e2e/adminapi_cluster_getlogs.go index 422de1f4e53..3cd8e0509db 100644 --- a/test/e2e/adminapi_cluster_getlogs.go +++ b/test/e2e/adminapi_cluster_getlogs.go @@ -46,20 +46,22 @@ func testGetPodLogsOK(ctx context.Context, containerName, podName, namespace str By("creating a test pod in openshift-azure-operator namespace with some known logs") pod := mockPod(containerName, podName, namespace, expectedLog) - pod, err := clients.Kubernetes.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + pod = CreateK8sObjectWithRetry( + ctx, clients.Kubernetes.CoreV1().Pods(namespace).Create, pod, metav1.CreateOptions{}, + ) defer func() { By("deleting the test pod") - err = clients.Kubernetes.CoreV1().Pods(namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + DeleteK8sObjectWithRetry( + ctx, clients.Kubernetes.CoreV1().Pods(namespace).Delete, pod.Name, metav1.DeleteOptions{}, + ) }() By("waiting for the pod to successfully terminate") Eventually(func(g Gomega, ctx context.Context) { - pod, err = clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - g.Expect(err).NotTo(HaveOccurred()) - + pod = GetK8sObjectWithRetry( + ctx, clients.Kubernetes.CoreV1().Pods(namespace).Get, pod.Name, metav1.GetOptions{}, + ) g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded)) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) diff --git a/test/e2e/adminapi_csr.go b/test/e2e/adminapi_csr.go index 56e9207185a..4ac6ddcdec2 100644 --- a/test/e2e/adminapi_csr.go +++ b/test/e2e/adminapi_csr.go @@ -37,16 +37,17 @@ var _ = Describe("[Admin API] CertificateSigningRequest action", func() { By("creating mock CSRs via Kubernetes API") for i := 0; i < csrCount; i++ { csr := mockCSR(prefix+strconv.Itoa(i), namespace, csrData) - _, err := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + + createFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Create + CreateK8sObjectWithRetry(ctx, createFunc, csr, metav1.CreateOptions{}) } }) AfterEach(func(ctx context.Context) { By("deleting the mock CSRs via Kubernetes API") for i := 0; i < csrCount; i++ { - err := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Delete(ctx, prefix+strconv.Itoa(i), metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + deleteFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, prefix+strconv.Itoa(i), metav1.DeleteOptions{}) } }) @@ -70,8 +71,8 @@ func testCSRApproveOK(ctx context.Context, objName, namespace string) { Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that the CSR was approved via Kubernetes API") - testcsr, err := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get(ctx, objName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get + testcsr := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{}) approved := false for _, condition := range testcsr.Status.Conditions { @@ -93,8 +94,8 @@ func testCSRMassApproveOK(ctx context.Context, namePrefix, namespace string, csr By("checking that all CSRs were approved via Kubernetes API") for i := 1; i < csrCount; i++ { - testcsr, err := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get(ctx, namePrefix+strconv.Itoa(i), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get + testcsr := GetK8sObjectWithRetry(ctx, getFunc, namePrefix+strconv.Itoa(i), metav1.GetOptions{}) approved := false for _, condition := range testcsr.Status.Conditions { diff --git a/test/e2e/adminapi_delete_managedresource.go b/test/e2e/adminapi_delete_managedresource.go index 9047ff7a8d6..259ee4efa54 100644 --- a/test/e2e/adminapi_delete_managedresource.go +++ b/test/e2e/adminapi_delete_managedresource.go @@ -46,8 +46,8 @@ var _ = Describe("[Admin API] Delete managed resource action", func() { var pipAddressID string By("creating a test service of type loadbalancer") - _, err := clients.Kubernetes.CoreV1().Services("default").Create(ctx, &loadBalancerService, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + creationFunc := clients.Kubernetes.CoreV1().Services("default").Create + CreateK8sObjectWithRetry(ctx, creationFunc, &loadBalancerService, metav1.CreateOptions{}) defer func() { By("cleaning up the k8s loadbalancer service") @@ -66,8 +66,8 @@ var _ = Describe("[Admin API] Delete managed resource action", func() { // wait for ingress IP to be assigned as this indicate the service is ready Eventually(func(g Gomega, ctx context.Context) { - service, err = clients.Kubernetes.CoreV1().Services("default").Get(ctx, "test", metav1.GetOptions{}) - g.Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().Services("default").Get + service = GetK8sObjectWithRetry(ctx, getFunc, "test", metav1.GetOptions{}) g.Expect(service.Status.LoadBalancer.Ingress).To(HaveLen(1)) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) diff --git a/test/e2e/adminapi_drainnode.go b/test/e2e/adminapi_drainnode.go index b8804208c3d..95785138354 100644 --- a/test/e2e/adminapi_drainnode.go +++ b/test/e2e/adminapi_drainnode.go @@ -22,10 +22,13 @@ var _ = Describe("[Admin API] Cordon and Drain node actions", func() { It("should be able to cordon, drain, and uncordon nodes", func(ctx context.Context) { By("selecting a worker node in the cluster") - nodes, err := clients.Kubernetes.CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: "node-role.kubernetes.io/worker", - }) - Expect(err).NotTo(HaveOccurred()) + nodes := ListK8sObjectWithRetry( + ctx, + clients.Kubernetes.CoreV1().Nodes().List, + metav1.ListOptions{ + LabelSelector: "node-role.kubernetes.io/worker", + }) + Expect(nodes.Items).ShouldNot(BeEmpty()) node := nodes.Items[0] nodeName := node.Name @@ -48,7 +51,7 @@ var _ = Describe("[Admin API] Cordon and Drain node actions", func() { defer func() { By("uncordoning the node via Kubernetes API") - err = drain.RunCordonOrUncordon(drainer, &node, false) + err := drain.RunCordonOrUncordon(drainer, &node, false) Expect(err).NotTo(HaveOccurred()) }() @@ -69,8 +72,8 @@ func testCordonNodeOK(ctx context.Context, nodeName string) { Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that node was cordoned via Kubernetes API") - node, err := clients.Kubernetes.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + node := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{}) + Expect(node.Name).To(Equal(nodeName)) Expect(node.Spec.Unschedulable).Should(BeTrue()) } @@ -86,8 +89,7 @@ func testUncordonNodeOK(ctx context.Context, nodeName string) { Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that node was uncordoned via Kubernetes API") - node, err := clients.Kubernetes.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + node := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().Nodes().Get, nodeName, metav1.GetOptions{}) Expect(node.Name).To(Equal(nodeName)) Expect(node.Spec.Unschedulable).Should(BeFalse()) } diff --git a/test/e2e/adminapi_kubernetesobjects.go b/test/e2e/adminapi_kubernetesobjects.go index d7760c0b9ed..72116e51dae 100644 --- a/test/e2e/adminapi_kubernetesobjects.go +++ b/test/e2e/adminapi_kubernetesobjects.go @@ -67,15 +67,15 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() { It("should not be able to create, get, list, update, or delete objects", func(ctx context.Context) { By("creating a test customer namespace via Kubernetes API") - _, err := clients.Kubernetes.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: namespace}, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + createNamespaceFunc := clients.Kubernetes.CoreV1().Namespaces().Create + namespaceToCreate := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + CreateK8sObjectWithRetry(ctx, createNamespaceFunc, namespaceToCreate, metav1.CreateOptions{}) defer func() { By("deleting the test customer namespace via Kubernetes API") - err := clients.Kubernetes.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{}) // To avoid flakes, we need it to be completely deleted before we can use it again // in a separate run or in a separate It block @@ -89,10 +89,10 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() { testConfigMapCreateOrUpdateForbidden(ctx, "creating", objName, namespace) By("creating an object via Kubernetes API") - _, err = clients.Kubernetes.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: objName}, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + createCMFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Create + configMapToCreate := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: objName}} + + CreateK8sObjectWithRetry(ctx, createCMFunc, configMapToCreate, metav1.CreateOptions{}) testConfigMapGetForbidden(ctx, objName, namespace) testConfigMapListForbidden(ctx, objName, namespace) @@ -105,15 +105,15 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() { It("should be able to list or get objects", func(ctx context.Context) { By("creating a test customer namespace via Kubernetes API") - _, err := clients.Kubernetes.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: namespace}, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + createNamespaceFunc := clients.Kubernetes.CoreV1().Namespaces().Create + namespaceToCreate := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + CreateK8sObjectWithRetry(ctx, createNamespaceFunc, namespaceToCreate, metav1.CreateOptions{}) defer func() { By("deleting the test customer namespace via Kubernetes API") - err := clients.Kubernetes.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + deleteFunc := clients.Kubernetes.CoreV1().Namespaces().Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, namespace, metav1.DeleteOptions{}) // To avoid flakes, we need it to be completely deleted before we can use it again // in a separate run or in a separate It block @@ -125,10 +125,10 @@ var _ = Describe("[Admin API] Kubernetes objects action", func() { }() By("creating an object via Kubernetes API") - _, err = clients.Kubernetes.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: objName}, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + createCMFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Create + configMapToCreate := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: objName}} + + CreateK8sObjectWithRetry(ctx, createCMFunc, configMapToCreate, metav1.CreateOptions{}) testConfigMapGetOK(ctx, objName, namespace, true) testConfigMapListOK(ctx, objName, namespace, true) @@ -204,8 +204,10 @@ func testConfigMapCreateOK(ctx context.Context, objName, namespace string) { Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that the object was created via Kubernetes API") - cm, err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get(ctx, objName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get + + cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{}) + Expect(obj.Namespace).To(Equal(cm.Namespace)) Expect(obj.Name).To(Equal(cm.Name)) Expect(obj.Data).To(Equal(cm.Data)) @@ -225,9 +227,11 @@ func testConfigMapGetOK(ctx context.Context, objName, namespace string, unrestri Expect(err).NotTo(HaveOccurred()) Expect(resp.StatusCode).To(Equal(http.StatusOK)) - By("comparing it to the actual object retrived via Kubernetes API") - cm, err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get(ctx, objName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + By("comparing it to the actual object retrieved via Kubernetes API") + getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get + + cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{}) + Expect(obj.Namespace).To(Equal(cm.Namespace)) Expect(obj.Name).To(Equal(cm.Name)) Expect(obj.Data).To(Equal(cm.Data)) @@ -246,7 +250,7 @@ func testConfigMapListOK(ctx context.Context, objName, namespace string, unrestr Expect(err).NotTo(HaveOccurred()) Expect(resp.StatusCode).To(Equal(http.StatusOK)) - By("comparing names from the list action response with names retrived via Kubernetes API") + By("comparing names from the list action response with names retrieved via Kubernetes API") var names []string for _, o := range obj.Items { names = append(names, o.Name) @@ -264,8 +268,10 @@ func testConfigMapUpdateOK(ctx context.Context, objName, namespace string) { Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that the object changed via Kubernetes API") - cm, err := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get(ctx, objName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().ConfigMaps(namespace).Get + + cm := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{}) + Expect(cm.Namespace).To(Equal(namespace)) Expect(cm.Name).To(Equal(objName)) Expect(cm.Data).To(Equal(map[string]string{"key": "new_value"})) @@ -367,8 +373,10 @@ func testPodCreateOK(ctx context.Context, containerName, objName, namespace stri Expect(resp.StatusCode).To(Equal(http.StatusOK)) By("checking that the pod was created via Kubernetes API") - pod, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, objName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().Pods(namespace).Get + + pod := GetK8sObjectWithRetry(ctx, getFunc, objName, metav1.GetOptions{}) + Expect(obj.Namespace).To(Equal(pod.Namespace)) Expect(obj.Name).To(Equal(pod.Name)) Expect(obj.Spec.Containers[0].Name).To(Equal(pod.Spec.Containers[0].Name)) diff --git a/test/e2e/adminapi_redeployvm.go b/test/e2e/adminapi_redeployvm.go index dd96f2f0ada..2d311c2a3ce 100644 --- a/test/e2e/adminapi_redeployvm.go +++ b/test/e2e/adminapi_redeployvm.go @@ -17,6 +17,7 @@ import ( mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute" corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/Azure/ARO-RP/pkg/util/ready" @@ -63,8 +64,8 @@ var _ = Describe("[Admin API] VM redeploy action", func() { By("waiting for the redeployed node to eventually become Ready in OpenShift") // wait 1 minute - this will guarantee we pass the minimum (default) threshold of Node heartbeats (40 seconds) Eventually(func(g Gomega, ctx context.Context) { - node, err := clients.Kubernetes.CoreV1().Nodes().Get(ctx, *vm.Name, metav1.GetOptions{}) - g.Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().Nodes().Get + node := GetK8sObjectWithRetry(ctx, getFunc, *vm.Name, metav1.GetOptions{}) g.Expect(ready.NodeIsReady(node)).To(BeTrue()) }).WithContext(ctx).WithTimeout(10 * time.Minute).WithPolling(time.Minute).Should(Succeed()) @@ -79,10 +80,10 @@ var _ = Describe("[Admin API] VM redeploy action", func() { func getNodeUptime(g Gomega, ctx context.Context, node string) (time.Time, error) { // container kernel = node kernel = `uptime` in a Pod reflects the Node as well namespace := "default" - name := fmt.Sprintf("%s-uptime-%d", node, GinkgoParallelProcess()) + podName := fmt.Sprintf("%s-uptime-%d", node, GinkgoParallelProcess()) pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: podName, Namespace: namespace, }, Spec: corev1.PodSpec{ @@ -108,25 +109,30 @@ func getNodeUptime(g Gomega, ctx context.Context, node string) (time.Time, error return time.Time{}, err } - // Defer Delete defer func() { - By("deleting uptime pod") - err := clients.Kubernetes.CoreV1().Pods(namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if err != nil { - log.Error("Could not delete test Pod") - } + By("deleting the uptime pod via Kubernetes API") + deleteFunc := clients.Kubernetes.CoreV1().Pods(namespace).Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, podName, metav1.DeleteOptions{}) + + // To avoid flakes, we need it to be completely deleted before we can use it again + // in a separate run or in a separate It block + By("waiting for uptime pod to be deleted") + Eventually(func(g Gomega, ctx context.Context) { + _, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) + g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "expect uptime pod to be deleted") + }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) }() By("waiting for uptime pod to move into the Succeeded phase") g.Eventually(func(g Gomega, ctx context.Context) { - p, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - g.Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.CoreV1().Pods(namespace).Get + pod := GetK8sObjectWithRetry(ctx, getFunc, podName, metav1.GetOptions{}) - g.Expect(p.Status.Phase).To(Equal(corev1.PodSucceeded)) + g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded)) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) By("getting logs") - req := clients.Kubernetes.CoreV1().Pods(namespace).GetLogs(name, &corev1.PodLogOptions{}) + req := clients.Kubernetes.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) stream, err := req.Stream(ctx) if err != nil { return time.Time{}, err diff --git a/test/e2e/cluster.go b/test/e2e/cluster.go index a7bc86b66be..58c47b792df 100644 --- a/test/e2e/cluster.go +++ b/test/e2e/cluster.go @@ -27,7 +27,6 @@ import ( "github.com/Azure/ARO-RP/pkg/util/ready" "github.com/Azure/ARO-RP/pkg/util/stringutils" "github.com/Azure/ARO-RP/pkg/util/version" - "github.com/Azure/ARO-RP/test/util/project" ) const ( @@ -35,28 +34,24 @@ const ( ) var _ = Describe("Cluster", Serial, func() { - var p project.Project + var project Project BeforeEach(func(ctx context.Context) { By("creating a test namespace") testNamespace := fmt.Sprintf("test-e2e-%d", GinkgoParallelProcess()) - p = project.NewProject(clients.Kubernetes, clients.Project, testNamespace) - err := p.Create(ctx) - Expect(err).NotTo(HaveOccurred(), "Failed to create test namespace") + project = BuildNewProject(ctx, clients.Kubernetes, clients.Project, testNamespace) By("verifying the namespace is ready") Eventually(func(ctx context.Context) error { - return p.Verify(ctx) + return project.VerifyProjectIsReady(ctx) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil()) DeferCleanup(func(ctx context.Context) { By("deleting a test namespace") - err := p.Delete(ctx) - Expect(err).NotTo(HaveOccurred(), "Failed to delete test namespace") - + project.Delete(ctx) By("verifying the namespace is deleted") Eventually(func(ctx context.Context) error { - return p.VerifyProjectIsDeleted(ctx) + return project.VerifyProjectIsDeleted(ctx) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil()) }) }) @@ -73,12 +68,11 @@ var _ = Describe("Cluster", Serial, func() { storageClass = "managed-premium" } - ssName, err := createStatefulSet(ctx, clients.Kubernetes, p.Name, storageClass) - Expect(err).NotTo(HaveOccurred()) + ssName := createStatefulSet(ctx, clients.Kubernetes, project.Name, storageClass) By("verifying the stateful set is ready") Eventually(func(g Gomega, ctx context.Context) { - s, err := clients.Kubernetes.AppsV1().StatefulSets(p.Name).Get(ctx, ssName, metav1.GetOptions{}) + s, err := clients.Kubernetes.AppsV1().StatefulSets(project.Name).Get(ctx, ssName, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready") GinkgoWriter.Println(s) @@ -179,17 +173,16 @@ var _ = Describe("Cluster", Serial, func() { By("creating stateful set") storageClass := "azurefile-csi" - ssName, err := createStatefulSet(ctx, clients.Kubernetes, p.Name, storageClass) - Expect(err).NotTo(HaveOccurred()) + ssName := createStatefulSet(ctx, clients.Kubernetes, project.Name, storageClass) By("verifying the stateful set is ready") Eventually(func(g Gomega, ctx context.Context) { - s, err := clients.Kubernetes.AppsV1().StatefulSets(p.Name).Get(ctx, ssName, metav1.GetOptions{}) + s, err := clients.Kubernetes.AppsV1().StatefulSets(project.Name).Get(ctx, ssName, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ready.StatefulSetIsReady(s)).To(BeTrue(), "expect stateful to be ready") pvcName := statefulSetPVCName(ssName, testPVCName, 0) - pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(p.Name).Get(ctx, pvcName, metav1.GetOptions{}) + pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(project.Name).Get(ctx, pvcName, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Println(pvc) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) @@ -223,18 +216,16 @@ var _ = Describe("Cluster", Serial, func() { It("can create load balancer services", func(ctx context.Context) { By("creating an external load balancer service") - err := createLoadBalancerService(ctx, clients.Kubernetes, "elb", p.Name, map[string]string{}) - Expect(err).NotTo(HaveOccurred()) + createLoadBalancerService(ctx, clients.Kubernetes, "elb", project.Name, map[string]string{}) By("creating an internal load balancer service") - err = createLoadBalancerService(ctx, clients.Kubernetes, "ilb", p.Name, map[string]string{ + createLoadBalancerService(ctx, clients.Kubernetes, "ilb", project.Name, map[string]string{ "service.beta.kubernetes.io/azure-load-balancer-internal": "true", }) - Expect(err).NotTo(HaveOccurred()) By("verifying the external load balancer service is ready") Eventually(func(ctx context.Context) bool { - svc, err := clients.Kubernetes.CoreV1().Services(p.Name).Get(ctx, "elb", metav1.GetOptions{}) + svc, err := clients.Kubernetes.CoreV1().Services(project.Name).Get(ctx, "elb", metav1.GetOptions{}) if err != nil { return false } @@ -243,7 +234,7 @@ var _ = Describe("Cluster", Serial, func() { By("verifying the internal load balancer service is ready") Eventually(func(ctx context.Context) bool { - svc, err := clients.Kubernetes.CoreV1().Services(p.Name).Get(ctx, "ilb", metav1.GetOptions{}) + svc, err := clients.Kubernetes.CoreV1().Services(project.Name).Get(ctx, "ilb", metav1.GetOptions{}) if err != nil { return false } @@ -257,12 +248,11 @@ var _ = Describe("Cluster", Serial, func() { deployName := "internal-registry-deploy" By("creating a test deployment from an internal container registry") - err := createContainerFromInternalContainerRegistryImage(ctx, clients.Kubernetes, deployName, p.Name) - Expect(err).NotTo(HaveOccurred()) + createContainerFromInternalContainerRegistryImage(ctx, clients.Kubernetes, deployName, project.Name) By("verifying the deployment is ready") Eventually(func(g Gomega, ctx context.Context) { - s, err := clients.Kubernetes.AppsV1().Deployments(p.Name).Get(ctx, deployName, metav1.GetOptions{}) + s, err := clients.Kubernetes.AppsV1().Deployments(project.Name).Get(ctx, deployName, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ready.DeploymentIsReady(s)).To(BeTrue(), "expect stateful to be ready") @@ -290,14 +280,17 @@ func clusterSubnets(oc redhatopenshift.OpenShiftCluster) []string { return subnets } -func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace, storageClass string) (string, error) { - pvcStorage, err := resource.ParseQuantity("2Gi") +func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace, storageClass string) string { + quantity := "2Gi" + pvcStorage, err := resource.ParseQuantity(quantity) if err != nil { - return "", err + message := fmt.Sprintf("Could not parse %v when creating a stateful set.", quantity) + Fail(message) } + ssName := fmt.Sprintf("busybox-%s-%d", storageClass, GinkgoParallelProcess()) - _, err = cli.AppsV1().StatefulSets(namespace).Create(ctx, &appsv1.StatefulSet{ + ss := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: ssName, }, @@ -349,15 +342,17 @@ func createStatefulSet(ctx context.Context, cli kubernetes.Interface, namespace, }, }, }, - }, metav1.CreateOptions{}) - return ssName, err + } + + _ = CreateK8sObjectWithRetry(ctx, cli.AppsV1().StatefulSets(namespace).Create, ss, metav1.CreateOptions{}) + return ssName } func statefulSetPVCName(ssName string, claimName string, ordinal int) string { return fmt.Sprintf("%s-%s-%d", claimName, ssName, ordinal) } -func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, name, namespace string, annotations map[string]string) error { +func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, name, namespace string, annotations map[string]string) { svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -374,11 +369,10 @@ func createLoadBalancerService(ctx context.Context, cli kubernetes.Interface, na Type: corev1.ServiceTypeLoadBalancer, }, } - _, err := cli.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{}) - return err + CreateK8sObjectWithRetry(ctx, cli.CoreV1().Services(namespace).Create, svc, metav1.CreateOptions{}) } -func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli kubernetes.Interface, name, namespace string) error { +func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli kubernetes.Interface, name, namespace string) { deploy := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -411,6 +405,5 @@ func createContainerFromInternalContainerRegistryImage(ctx context.Context, cli }, }, } - _, err := cli.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{}) - return err + CreateK8sObjectWithRetry(ctx, cli.AppsV1().Deployments(namespace).Create, deploy, metav1.CreateOptions{}) } diff --git a/test/e2e/disk_encryption.go b/test/e2e/disk_encryption.go index 83eb407e1b8..b796b07aa4e 100644 --- a/test/e2e/disk_encryption.go +++ b/test/e2e/disk_encryption.go @@ -110,8 +110,10 @@ var _ = Describe("Disk encryption at rest", func() { } By("making sure the encrypted storage class is default") - sc, err := clients.Kubernetes.StorageV1().StorageClasses().Get(ctx, defaultStorageClass, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + sc := GetK8sObjectWithRetry( + ctx, clients.Kubernetes.StorageV1().StorageClasses().Get, defaultStorageClass, metav1.GetOptions{}, + ) + Expect(sc).NotTo(BeNil()) Expect(sc.Annotations).NotTo(BeNil()) Expect(sc.Annotations["storageclass.kubernetes.io/is-default-class"]).To(Equal("true")) diff --git a/test/e2e/dns.go b/test/e2e/dns.go index f50a858409c..0b48d50a586 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -23,12 +23,11 @@ import ( "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/network" "github.com/Azure/ARO-RP/pkg/util/ready" "github.com/Azure/ARO-RP/pkg/util/stringutils" - "github.com/Azure/ARO-RP/test/util/project" ) var ( nameserverRegex = regexp.MustCompile("nameserver [0-9.]*") - verifyResolvConfTimeout = 30 * time.Second + verifyResolvConfTimeout = 5 * time.Minute verifyResolvConfPollInterval = 1 * time.Second nodesReadyPollInterval = 2 * time.Second nicUpdateWaitTime = 5 * time.Second @@ -43,30 +42,23 @@ var _ = Describe("ARO cluster DNS", func() { It("must not be adversely affected by Azure host servicing", func(ctx context.Context) { By("creating a test namespace") testNamespace := fmt.Sprintf("test-e2e-%d", GinkgoParallelProcess()) - p := project.NewProject(clients.Kubernetes, clients.Project, testNamespace) - err := p.Create(ctx) - Expect(err).NotTo(HaveOccurred(), "Failed to create test namespace") + p := BuildNewProject(ctx, clients.Kubernetes, clients.Project, testNamespace) By("verifying the namespace is ready") Eventually(func(ctx context.Context) error { - return p.Verify(ctx) + return p.VerifyProjectIsReady(ctx) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil()) DeferCleanup(func(ctx context.Context) { By("deleting the test namespace") - err := p.Delete(ctx) - Expect(err).NotTo(HaveOccurred(), "Failed to delete test namespace") - - By("verifying the namespace is deleted") - Eventually(func(ctx context.Context) error { - return p.VerifyProjectIsDeleted(ctx) - }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(BeNil()) + p.CleanUp(ctx) }) By("listing all cluster nodes to retrieve the names of the worker nodes") workerNodes := map[string]string{} - nodeList, err := clients.Kubernetes.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + nodeList := ListK8sObjectWithRetry( + ctx, clients.Kubernetes.CoreV1().Nodes().List, metav1.ListOptions{}, + ) for _, node := range nodeList.Items { name := node.ObjectMeta.Name @@ -154,8 +146,7 @@ var _ = Describe("ARO cluster DNS", func() { By("verifying each worker node's resolv.conf via a one-shot Job per node") for wn, ip := range workerNodes { - err = createResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) - Expect(err).NotTo(HaveOccurred()) + createResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) Eventually(verifyResolvConf). WithContext(ctx). @@ -164,8 +155,7 @@ var _ = Describe("ARO cluster DNS", func() { WithArguments(clients.Kubernetes, wn, testNamespace, ip). Should(Succeed()) - err = deleteResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) - Expect(err).NotTo(HaveOccurred()) + deleteResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) } By("stopping all three worker VMs") @@ -202,8 +192,7 @@ var _ = Describe("ARO cluster DNS", func() { By("verifying each worker node's resolv.conf is still correct after simulating host servicing, again via a one-shot Job per node") for wn, ip := range workerNodes { - err = createResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) - Expect(err).NotTo(HaveOccurred()) + createResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) Eventually(verifyResolvConf). WithContext(ctx). @@ -212,8 +201,7 @@ var _ = Describe("ARO cluster DNS", func() { WithArguments(clients.Kubernetes, wn, testNamespace, ip). Should(Succeed()) - err = deleteResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) - Expect(err).NotTo(HaveOccurred()) + deleteResolvConfJob(ctx, clients.Kubernetes, wn, testNamespace) } By("stopping all three worker VMs") @@ -258,7 +246,7 @@ func resolvConfJobName(nodeName string) string { return jobName } -func createResolvConfJob(ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string) error { +func createResolvConfJob(ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string) { hpt := corev1.HostPathFile job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -302,25 +290,24 @@ func createResolvConfJob(ctx context.Context, cli kubernetes.Interface, nodeName }, }, } - _, err := cli.BatchV1().Jobs(namespace).Create(ctx, job, metav1.CreateOptions{}) - return err + CreateK8sObjectWithRetry(ctx, cli.BatchV1().Jobs(namespace).Create, job, metav1.CreateOptions{}) } -func deleteResolvConfJob(ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string) error { +func deleteResolvConfJob(ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string) { dpb := metav1.DeletePropagationBackground - return cli.BatchV1().Jobs(namespace).Delete(ctx, resolvConfJobName(nodeName), metav1.DeleteOptions{ + deleteFunc := cli.BatchV1().Jobs(namespace).Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, resolvConfJobName(nodeName), metav1.DeleteOptions{ PropagationPolicy: &dpb, }) } -func verifyResolvConf(ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string, nodeIp string) error { +func verifyResolvConf( + ctx context.Context, cli kubernetes.Interface, nodeName string, namespace string, nodeIp string, +) error { jobName := resolvConfJobName(nodeName) - podList, err := cli.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + podList := ListK8sObjectWithRetry(ctx, cli.CoreV1().Pods(namespace).List, metav1.ListOptions{ LabelSelector: fmt.Sprintf("job-name=%s", jobName), }) - if err != nil { - return err - } if len(podList.Items) != 1 { return fmt.Errorf("found %v Pods associated with the Job, but there should be exactly 1", len(podList.Items)) } diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go new file mode 100644 index 00000000000..433674deffa --- /dev/null +++ b/test/e2e/helpers.go @@ -0,0 +1,179 @@ +package e2e + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + projectv1 "github.com/openshift/api/project/v1" + projectclient "github.com/openshift/client-go/project/clientset/versioned" + authorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +var ( + DefaultTimeout = 10 * time.Minute + PollingInterval = 250 * time.Millisecond +) + +type K8sGetFunc[T kruntime.Object] func(ctx context.Context, name string, options metav1.GetOptions) (T, error) +type K8sListFunc[T kruntime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) +type K8sCreateFunc[T kruntime.Object] func(ctx context.Context, object T, options metav1.CreateOptions) (T, error) +type K8sDeleteFunc func(ctx context.Context, name string, options metav1.DeleteOptions) error + +// GetK8sObjectWithRetry takes a get function like clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Get +// and the parameters for it. It then makes the call with some retry logic and returns the result after +// asserting there were no errors. +func GetK8sObjectWithRetry[T kruntime.Object]( + ctx context.Context, getFunc K8sGetFunc[T], name string, options metav1.GetOptions, +) T { + var object T + Eventually(func(g Gomega, ctx context.Context) { + result, err := getFunc(ctx, name, options) + g.Expect(err).NotTo(HaveOccurred()) + object = result + }).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed()) + return object +} + +// GetK8sPodLogsWithRetry gets the logs for the specified pod in the named namespace. It gets them with some +// retry logic and returns the raw string-ified body after asserting there were no errors. +func GetK8sPodLogsWithRetry( + ctx context.Context, namespace string, name string, options corev1.PodLogOptions, +) (rawBody string) { + Eventually(func(g Gomega, ctx context.Context) { + body, err := clients.Kubernetes.CoreV1().Pods(namespace).GetLogs(name, &options).DoRaw(ctx) + g.Expect(err).NotTo(HaveOccurred()) + rawBody = string(body) + }).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed()) + return +} + +// ListK8sObjectWithRetry takes a list function like clients.Kubernetes.CoreV1().Nodes().List and the +// parameters for it. It then makes the call with some retry logic and returns the result after +// asserting there were no errors. +func ListK8sObjectWithRetry[T kruntime.Object]( + ctx context.Context, listFunc K8sListFunc[T], options metav1.ListOptions, +) T { + var object T + Eventually(func(g Gomega, ctx context.Context) { + result, err := listFunc(ctx, options) + g.Expect(err).NotTo(HaveOccurred()) + object = result + }).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed()) + return object +} + +// CreateK8sObjectWithRetry takes a create function like clients.Kubernetes.CoreV1().Pods(namespace).Create +// and the parameters for it. It then makes the call with some retry logic and returns the result after +// asserting there were no errors. +func CreateK8sObjectWithRetry[T kruntime.Object]( + ctx context.Context, createFunc K8sCreateFunc[T], obj T, options metav1.CreateOptions, +) T { + var object T + Eventually(func(g Gomega, ctx context.Context) { + result, err := createFunc(ctx, obj, options) + g.Expect(err).NotTo(HaveOccurred()) + object = result + }).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed()) + return object +} + +// DeleteK8sObjectWithRetry takes a delete function like clients.Kubernetes.CertificatesV1().CertificateSigningRequests().Delete +// and the parameters for it. It then makes the call with some retry logic. +func DeleteK8sObjectWithRetry( + ctx context.Context, deleteFunc K8sDeleteFunc, name string, options metav1.DeleteOptions, +) { + Eventually(func(g Gomega, ctx context.Context) { + err := deleteFunc(ctx, name, options) + g.Expect(err).NotTo(HaveOccurred()) + }).WithContext(ctx).WithTimeout(DefaultTimeout).WithPolling(PollingInterval).Should(Succeed()) +} + +type Project struct { + projectClient projectclient.Interface + cli kubernetes.Interface + Name string +} + +func BuildNewProject( + ctx context.Context, cli kubernetes.Interface, projectClient projectclient.Interface, name string, +) Project { + p := Project{ + projectClient: projectClient, + cli: cli, + Name: name, + } + createFunc := p.projectClient.ProjectV1().Projects().Create + CreateK8sObjectWithRetry(ctx, createFunc, &projectv1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.Name, + }, + }, metav1.CreateOptions{}) + return p +} + +func (p Project) CleanUp(ctx context.Context) { + p.Delete(ctx) + p.VerifyProjectIsDeleted(ctx) +} + +func (p Project) Delete(ctx context.Context) { + deleteFunc := p.projectClient.ProjectV1().Projects().Delete + DeleteK8sObjectWithRetry(ctx, deleteFunc, p.Name, metav1.DeleteOptions{}) +} + +// VerifyProjectIsReady verifies that the project and relevant resources have been created correctly +// and returns an error if any. +func (p Project) VerifyProjectIsReady(ctx context.Context) error { + By("creating a SelfSubjectAccessReviews") + CreateK8sObjectWithRetry(ctx, p.cli.AuthorizationV1().SelfSubjectAccessReviews().Create, + &authorizationv1.SelfSubjectAccessReview{ + Spec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: p.Name, + Verb: "create", + Resource: "pods", + }, + }, + }, metav1.CreateOptions{}) + + By("getting the relevant SA") + sa := GetK8sObjectWithRetry( + ctx, p.cli.CoreV1().ServiceAccounts(p.Name).Get, "default", metav1.GetOptions{}, + ) + + if len(sa.Secrets) == 0 { + return fmt.Errorf("default ServiceAccount does not have secrets") + } + + project := GetK8sObjectWithRetry( + ctx, p.projectClient.ProjectV1().Projects().Get, p.Name, metav1.GetOptions{}, + ) + _, found := project.Annotations["openshift.io/sa.scc.uid-range"] + if !found { + return fmt.Errorf("SCC annotation does not exist") + } + return nil +} + +// VerifyProjectIsDeleted verifies that the project does not exist and returns error if a project exists +// or if it encounters an error other than NotFound +func (p Project) VerifyProjectIsDeleted(ctx context.Context) error { + _, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("Project exists") +} diff --git a/test/e2e/operator.go b/test/e2e/operator.go index 01ba11292fc..33be4deecfa 100644 --- a/test/e2e/operator.go +++ b/test/e2e/operator.go @@ -39,26 +39,23 @@ import ( "github.com/Azure/ARO-RP/pkg/util/ready" ) -func updatedObjects(ctx context.Context, nsfilter string) ([]string, error) { - pods, err := clients.Kubernetes.CoreV1().Pods("openshift-azure-operator").List(ctx, metav1.ListOptions{ - LabelSelector: "app=aro-operator-master", - }) - if err != nil { - return nil, err - } +func updatedObjects(ctx context.Context, nsFilter string) ([]string, error) { + listFunc := clients.Kubernetes.CoreV1().Pods("openshift-azure-operator").List + pods := ListK8sObjectWithRetry( + ctx, listFunc, metav1.ListOptions{LabelSelector: "app=aro-operator-master"}, + ) if len(pods.Items) != 1 { return nil, fmt.Errorf("%d aro-operator-master pods found", len(pods.Items)) } - b, err := clients.Kubernetes.CoreV1().Pods("openshift-azure-operator").GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{}).DoRaw(ctx) - if err != nil { - return nil, err - } + body := GetK8sPodLogsWithRetry( + ctx, "openshift-azure-operator", pods.Items[0].Name, corev1.PodLogOptions{}, + ) rx := regexp.MustCompile(`msg="(Update|Create) ([-a-zA-Z/.]+)`) - changes := rx.FindAllStringSubmatch(string(b), -1) + changes := rx.FindAllStringSubmatch(body, -1) result := make([]string, 0, len(changes)) for _, change := range changes { - if nsfilter == "" || strings.Contains(change[2], "/"+nsfilter+"/") { + if nsFilter == "" || strings.Contains(change[2], "/"+nsFilter+"/") { result = append(result, change[1]+" "+change[2]) } } @@ -141,8 +138,9 @@ var _ = Describe("ARO Operator - Geneva Logging", func() { Expect(err).NotTo(HaveOccurred()) By("deleting mdsd DaemonSet") - err = clients.Kubernetes.AppsV1().DaemonSets("openshift-azure-logging").Delete(ctx, "mdsd", metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + DeleteK8sObjectWithRetry( + ctx, clients.Kubernetes.AppsV1().DaemonSets("openshift-azure-logging").Delete, "mdsd", metav1.DeleteOptions{}, + ) By("checking that mdsd DaemonSet is ready") Eventually(mdsdIsReady).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) @@ -161,14 +159,10 @@ var _ = Describe("ARO Operator - Geneva Logging", func() { var _ = Describe("ARO Operator - Cluster Monitoring ConfigMap", func() { It("must not have persistent volume set", func(ctx context.Context) { var cm *corev1.ConfigMap - configMapExists := func(g Gomega, ctx context.Context) { - var err error - cm, err = clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Get(ctx, "cluster-monitoring-config", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - } + getFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Get By("waiting for the ConfigMap to make sure it exists") - Eventually(configMapExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + cm = GetK8sObjectWithRetry(ctx, getFunc, "cluster-monitoring-config", metav1.GetOptions{}) By("unmarshalling the config from the ConfigMap data") var configData monitoring.Config @@ -187,39 +181,33 @@ var _ = Describe("ARO Operator - Cluster Monitoring ConfigMap", func() { }) It("must be restored if deleted", func(ctx context.Context) { - configMapExists := func(g Gomega, ctx context.Context) { - _, err := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Get(ctx, "cluster-monitoring-config", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - } + getFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Get + deleteFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Delete By("waiting for the ConfigMap to make sure it exists") - Eventually(configMapExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, "cluster-monitoring-config", metav1.GetOptions{}) By("deleting for the ConfigMap") - err := clients.Kubernetes.CoreV1().ConfigMaps("openshift-monitoring").Delete(ctx, "cluster-monitoring-config", metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + DeleteK8sObjectWithRetry(ctx, deleteFunc, "cluster-monitoring-config", metav1.DeleteOptions{}) By("waiting for the ConfigMap to make sure it was restored") - Eventually(configMapExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, "cluster-monitoring-config", metav1.GetOptions{}) }) }) var _ = Describe("ARO Operator - RBAC", func() { It("must restore system:aro-sre ClusterRole if deleted", func(ctx context.Context) { - clusterRoleExists := func(g Gomega, ctx context.Context) { - _, err := clients.Kubernetes.RbacV1().ClusterRoles().Get(ctx, "system:aro-sre", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - } + getFunc := clients.Kubernetes.RbacV1().ClusterRoles().Get + deleteFunc := clients.Kubernetes.RbacV1().ClusterRoles().Delete By("waiting for the ClusterRole to make sure it exists") - Eventually(clusterRoleExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, "system:aro-sre", metav1.GetOptions{}) By("deleting for the ClusterRole") - err := clients.Kubernetes.RbacV1().ClusterRoles().Delete(ctx, "system:aro-sre", metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + DeleteK8sObjectWithRetry(ctx, deleteFunc, "system:aro-sre", metav1.DeleteOptions{}) By("waiting for the ClusterRole to make sure it was restored") - Eventually(clusterRoleExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, "system:aro-sre", metav1.GetOptions{}) }) }) @@ -301,7 +289,7 @@ func subnetReconciliationAnnotationExists(annotations map[string]string) bool { var _ = Describe("ARO Operator - Azure Subnet Reconciler", func() { var vnetName, location, resourceGroup string var subnetsToReconcile map[string]*string - var testnsg mgmtnetwork.SecurityGroup + var testNSG mgmtnetwork.SecurityGroup const nsg = "e2e-nsg" const emptyMachineSet = "e2e-test-machineset" @@ -357,17 +345,17 @@ var _ = Describe("ARO Operator - Azure Subnet Reconciler", func() { createE2ENSG := func(ctx context.Context) { By("creating an empty test NSG") - testnsg = mgmtnetwork.SecurityGroup{ + testNSG = mgmtnetwork.SecurityGroup{ Location: &location, Name: to.StringPtr(nsg), Type: to.StringPtr("Microsoft.Network/networkSecurityGroups"), SecurityGroupPropertiesFormat: &mgmtnetwork.SecurityGroupPropertiesFormat{}, } - err := clients.NetworkSecurityGroups.CreateOrUpdateAndWait(ctx, resourceGroup, nsg, testnsg) + err := clients.NetworkSecurityGroups.CreateOrUpdateAndWait(ctx, resourceGroup, nsg, testNSG) Expect(err).NotTo(HaveOccurred()) By("getting the freshly created test NSG resource") - testnsg, err = clients.NetworkSecurityGroups.Get(ctx, resourceGroup, nsg, "") + testNSG, err = clients.NetworkSecurityGroups.Get(ctx, resourceGroup, nsg, "") Expect(err).NotTo(HaveOccurred()) } @@ -403,11 +391,11 @@ var _ = Describe("ARO Operator - Azure Subnet Reconciler", func() { It("must reconcile list of subnets when NSG is changed", func(ctx context.Context) { for subnet := range subnetsToReconcile { By(fmt.Sprintf("assigning test NSG to subnet %q", subnet)) - // Gets current subnet NSG and then updates it to testnsg. + // Gets current subnet NSG and then updates it to testNSG. subnetObject, err := clients.Subnet.Get(ctx, resourceGroup, vnetName, subnet, "") Expect(err).NotTo(HaveOccurred()) - subnetObject.NetworkSecurityGroup = &testnsg + subnetObject.NetworkSecurityGroup = &testNSG err = clients.Subnet.CreateOrUpdateAndWait(ctx, resourceGroup, vnetName, subnet, subnetObject) Expect(err).NotTo(HaveOccurred()) @@ -415,11 +403,11 @@ var _ = Describe("ARO Operator - Azure Subnet Reconciler", func() { By("creating an empty MachineSet to force a reconcile") Eventually(func(g Gomega, ctx context.Context) { - machinesets, err := clients.MachineAPI.MachineV1beta1().MachineSets("openshift-machine-api").List(ctx, metav1.ListOptions{}) + machineSets, err := clients.MachineAPI.MachineV1beta1().MachineSets("openshift-machine-api").List(ctx, metav1.ListOptions{}) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(machinesets.Items).To(Not(BeEmpty())) + g.Expect(machineSets.Items).To(Not(BeEmpty())) - newMachineSet := machinesets.Items[0].DeepCopy() + newMachineSet := machineSets.Items[0].DeepCopy() newMachineSet.Status = machinev1beta1.MachineSetStatus{} newMachineSet.ObjectMeta = metav1.ObjectMeta{ Name: emptyMachineSet, @@ -457,57 +445,45 @@ var _ = Describe("ARO Operator - MUO Deployment", func() { It("must be deployed by default with FIPS crypto mandated", func(ctx context.Context) { By("getting MUO pods") - pods, err := clients.Kubernetes.CoreV1().Pods(managedUpgradeOperatorNamespace).List(ctx, metav1.ListOptions{ - LabelSelector: "name=managed-upgrade-operator", - }) - Expect(err).NotTo(HaveOccurred()) + pods := ListK8sObjectWithRetry( + ctx, clients.Kubernetes.CoreV1().Pods(managedUpgradeOperatorNamespace).List, metav1.ListOptions{ + LabelSelector: "name=managed-upgrade-operator", + }) Expect(pods.Items).NotTo(BeEmpty()) By("verifying that MUO has FIPS crypto mandated by reading logs") Eventually(func(g Gomega, ctx context.Context) { - b, err := clients.Kubernetes.CoreV1().Pods(managedUpgradeOperatorNamespace).GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{}).DoRaw(ctx) - g.Expect(err).NotTo(HaveOccurred()) + body := GetK8sPodLogsWithRetry( + ctx, managedUpgradeOperatorNamespace, pods.Items[0].Name, corev1.PodLogOptions{}, + ) - g.Expect(string(b)).To(ContainSubstring(`X:boringcrypto,strictfipsruntime`)) + g.Expect(body).To(ContainSubstring(`X:boringcrypto,strictfipsruntime`)) }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) }, SpecTimeout(2*time.Minute)) It("must be restored if deleted", func(ctx context.Context) { - deleteMUODeployment := func(ctx context.Context) error { - return clients.Kubernetes. - AppsV1(). - Deployments(managedUpgradeOperatorNamespace). - Delete(ctx, managedUpgradeOperatorDeployment, metav1.DeleteOptions{}) - } - - muoDeploymentExists := func(g Gomega, ctx context.Context) { - _, err := clients.Kubernetes. - AppsV1(). - Deployments(managedUpgradeOperatorNamespace). - Get(ctx, managedUpgradeOperatorDeployment, metav1.GetOptions{}) - - g.Expect(err).ToNot(HaveOccurred()) - } + deleteFunc := clients.Kubernetes.AppsV1().Deployments(managedUpgradeOperatorNamespace).Delete + getFunc := clients.Kubernetes.AppsV1().Deployments(managedUpgradeOperatorNamespace).Get By("waiting for the MUO deployment to be ready") - Eventually(muoDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, managedUpgradeOperatorDeployment, metav1.GetOptions{}) By("deleting the MUO deployment") - Expect(deleteMUODeployment(ctx)).Should(Succeed()) + DeleteK8sObjectWithRetry(ctx, deleteFunc, managedUpgradeOperatorDeployment, metav1.DeleteOptions{}) By("waiting for the MUO deployment to be reconciled") - Eventually(muoDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, managedUpgradeOperatorDeployment, metav1.GetOptions{}) }, SpecTimeout(2*time.Minute)) }) var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { const ( - imageconfigFlag = operator.ImageConfigEnabled + imageConfigFlag = operator.ImageConfigEnabled optionalRegistry = "quay.io" timeout = 5 * time.Minute ) var requiredRegistries []string - var imageconfig *configv1.Image + var imageConfig *configv1.Image sliceEqual := func(a, b []string) bool { if len(a) != len(b) { @@ -530,14 +506,14 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { verifyLists := func(expectedAllowList, expectedBlockList []string) func(g Gomega, ctx context.Context) { return func(g Gomega, ctx context.Context) { By("getting the actual Image config state") - // have to do this because using declaration assignment in following line results in pre-declared imageconfig var not being used + // have to do this because using declaration assignment in following line results in pre-declared imageConfig var not being used var err error - imageconfig, err = clients.ConfigClient.ConfigV1().Images().Get(ctx, "cluster", metav1.GetOptions{}) + imageConfig, err = clients.ConfigClient.ConfigV1().Images().Get(ctx, "cluster", metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) By("comparing the actual allow and block lists with expected lists") - g.Expect(sliceEqual(imageconfig.Spec.RegistrySources.AllowedRegistries, expectedAllowList)).To(BeTrue()) - g.Expect(sliceEqual(imageconfig.Spec.RegistrySources.BlockedRegistries, expectedBlockList)).To(BeTrue()) + g.Expect(sliceEqual(imageConfig.Spec.RegistrySources.AllowedRegistries, expectedAllowList)).To(BeTrue()) + g.Expect(sliceEqual(imageConfig.Spec.RegistrySources.BlockedRegistries, expectedBlockList)).To(BeTrue()) } } @@ -546,7 +522,7 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { instance, err := clients.AROClusters.AroV1alpha1().Clusters().Get(ctx, "cluster", metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - if !instance.Spec.OperatorFlags.GetSimpleBoolean(imageconfigFlag) { + if !instance.Spec.OperatorFlags.GetSimpleBoolean(imageConfigFlag) { Skip("ImageConfig Controller is not enabled, skipping test") } @@ -555,16 +531,16 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { Expect(err).NotTo(HaveOccurred()) By("getting the Image config") - imageconfig, err = clients.ConfigClient.ConfigV1().Images().Get(ctx, "cluster", metav1.GetOptions{}) + imageConfig, err = clients.ConfigClient.ConfigV1().Images().Get(ctx, "cluster", metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func(ctx context.Context) { By("resetting Image config") - imageconfig.Spec.RegistrySources.AllowedRegistries = nil - imageconfig.Spec.RegistrySources.BlockedRegistries = nil + imageConfig.Spec.RegistrySources.AllowedRegistries = nil + imageConfig.Spec.RegistrySources.BlockedRegistries = nil - _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageconfig, metav1.UpdateOptions{}) + _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageConfig, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) By("waiting for the Image config to be reset") @@ -572,8 +548,8 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { }) It("must set empty allow and block lists in Image config by default", func() { - allowList := imageconfig.Spec.RegistrySources.AllowedRegistries - blockList := imageconfig.Spec.RegistrySources.BlockedRegistries + allowList := imageConfig.Spec.RegistrySources.AllowedRegistries + blockList := imageConfig.Spec.RegistrySources.BlockedRegistries By("checking that the allow and block lists are empty") Expect(allowList).To(BeEmpty()) @@ -582,8 +558,8 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { It("must add the ARO service registries to the allow list alongside the customer added registries", func(ctx context.Context) { By("adding the test registry to the allow list of the Image config") - imageconfig.Spec.RegistrySources.AllowedRegistries = append(imageconfig.Spec.RegistrySources.AllowedRegistries, optionalRegistry) - _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageconfig, metav1.UpdateOptions{}) + imageConfig.Spec.RegistrySources.AllowedRegistries = append(imageConfig.Spec.RegistrySources.AllowedRegistries, optionalRegistry) + _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageConfig, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) By("checking that Image config eventually has ARO service registries and the test registry in the allow list") @@ -593,8 +569,8 @@ var _ = Describe("ARO Operator - ImageConfig Reconciler", func() { It("must remove ARO service registries from the block lists, but keep customer added registries", func(ctx context.Context) { By("adding the test registry and one of the ARO service registry to the block list of the Image config") - imageconfig.Spec.RegistrySources.BlockedRegistries = append(imageconfig.Spec.RegistrySources.BlockedRegistries, optionalRegistry, requiredRegistries[0]) - _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageconfig, metav1.UpdateOptions{}) + imageConfig.Spec.RegistrySources.BlockedRegistries = append(imageConfig.Spec.RegistrySources.BlockedRegistries, optionalRegistry, requiredRegistries[0]) + _, err := clients.ConfigClient.ConfigV1().Images().Update(ctx, imageConfig, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) By("checking that Image config eventually doesn't include ARO service registries") @@ -677,30 +653,17 @@ var _ = Describe("ARO Operator - Guardrails", func() { Skip("Guardrails Controller is not enabled, skipping test") } - deleteControllerManagerDeployment := func(ctx context.Context) error { - return clients.Kubernetes. - AppsV1(). - Deployments(guardrailsNamespace). - Delete(ctx, gkControllerManagerDeployment, metav1.DeleteOptions{}) - } - - controllerManagerDeploymentExists := func(g Gomega, ctx context.Context) { - _, err := clients.Kubernetes. - AppsV1(). - Deployments(guardrailsNamespace). - Get(ctx, gkControllerManagerDeployment, metav1.GetOptions{}) - - g.Expect(err).ToNot(HaveOccurred()) - } + getFunc := clients.Kubernetes.AppsV1().Deployments(guardrailsNamespace).Get + deleteFunc := clients.Kubernetes.AppsV1().Deployments(guardrailsNamespace).Delete By("waiting for the gatekeeper Controller Manager deployment to be ready") - Eventually(controllerManagerDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, gkControllerManagerDeployment, metav1.GetOptions{}) By("deleting the gatekeeper Controller Manager deployment") - Expect(deleteControllerManagerDeployment(ctx)).Should(Succeed()) + DeleteK8sObjectWithRetry(ctx, deleteFunc, gkControllerManagerDeployment, metav1.DeleteOptions{}) By("waiting for the gatekeeper Controller Manager deployment to be reconciled") - Eventually(controllerManagerDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, gkControllerManagerDeployment, metav1.GetOptions{}) }) It("Audit must be restored if deleted", func(ctx context.Context) { @@ -712,35 +675,22 @@ var _ = Describe("ARO Operator - Guardrails", func() { Skip("Guardrails Controller is not enabled, skipping test") } - deleteAuditDeployment := func(ctx context.Context) error { - return clients.Kubernetes. - AppsV1(). - Deployments(guardrailsNamespace). - Delete(ctx, gkAuditDeployment, metav1.DeleteOptions{}) - } - - auditDeploymentExists := func(g Gomega, ctx context.Context) { - _, err := clients.Kubernetes. - AppsV1(). - Deployments(guardrailsNamespace). - Get(ctx, gkAuditDeployment, metav1.GetOptions{}) - - g.Expect(err).ToNot(HaveOccurred()) - } + getFunc := clients.Kubernetes.AppsV1().Deployments(guardrailsNamespace).Get + deleteFunc := clients.Kubernetes.AppsV1().Deployments(guardrailsNamespace).Delete By("waiting for the gatekeeper Audit deployment to be ready") - Eventually(auditDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, gkAuditDeployment, metav1.GetOptions{}) By("deleting the gatekeeper Audit deployment") - Expect(deleteAuditDeployment(ctx)).Should(Succeed()) + DeleteK8sObjectWithRetry(ctx, deleteFunc, gkAuditDeployment, metav1.DeleteOptions{}) By("waiting for the gatekeeper Audit deployment to be reconciled") - Eventually(auditDeploymentExists).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + GetK8sObjectWithRetry(ctx, getFunc, gkAuditDeployment, metav1.GetOptions{}) }) }) -var _ = Describe("ARO Operator - Cloud Provder Config ConfigMap", func() { +var _ = Describe("ARO Operator - Cloud Provider Config ConfigMap", func() { const ( cpcControllerEnabled = operator.CloudProviderConfigEnabled ) @@ -754,13 +704,9 @@ var _ = Describe("ARO Operator - Cloud Provder Config ConfigMap", func() { Skip("CloudProviderConfig Controller is not enabled, skipping test") } - var cm *corev1.ConfigMap By("waiting for the ConfigMap to make sure it exists") - Eventually(func(g Gomega, ctx context.Context) { - var err error - cm, err = clients.Kubernetes.CoreV1().ConfigMaps("openshift-config").Get(ctx, "cloud-provider-config", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - }).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed()) + getFunc := clients.Kubernetes.CoreV1().ConfigMaps("openshift-config").Get + cm := GetK8sObjectWithRetry(ctx, getFunc, "cloud-provider-config", metav1.GetOptions{}) By("waiting for disableOutboundSNAT to be true") Eventually(func(g Gomega, ctx context.Context) { diff --git a/test/e2e/update.go b/test/e2e/update.go index b5ae061a678..99f499e33bf 100644 --- a/test/e2e/update.go +++ b/test/e2e/update.go @@ -51,12 +51,12 @@ var _ = Describe("Update clusters", func() { It("must restart the aro-operator-master Deployment", func(ctx context.Context) { By("saving the current revision of the aro-operator-master Deployment") - d, err := clients.Kubernetes.AppsV1().Deployments("openshift-azure-operator").Get(ctx, "aro-operator-master", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + getFunc := clients.Kubernetes.AppsV1().Deployments("openshift-azure-operator").Get + deployment := GetK8sObjectWithRetry(ctx, getFunc, "aro-operator-master", metav1.GetOptions{}) - Expect(d.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) + Expect(deployment.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) - oldRevision, err := strconv.Atoi(d.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) + oldRevision, err := strconv.Atoi(deployment.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) Expect(err).NotTo(HaveOccurred()) By("sending the PATCH request to update the cluster") @@ -64,14 +64,12 @@ var _ = Describe("Update clusters", func() { Expect(err).NotTo(HaveOccurred()) By("checking that the aro-operator-master Deployment was restarted") - d, err = clients.Kubernetes.AppsV1().Deployments("openshift-azure-operator").Get(ctx, "aro-operator-master", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Expect(d.Spec.Template.Annotations).To(HaveKey("kubectl.kubernetes.io/restartedAt")) + deployment = GetK8sObjectWithRetry(ctx, getFunc, "aro-operator-master", metav1.GetOptions{}) - Expect(d.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) + Expect(deployment.Spec.Template.Annotations).To(HaveKey("kubectl.kubernetes.io/restartedAt")) + Expect(deployment.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) - newRevision, err := strconv.Atoi(d.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) + newRevision, err := strconv.Atoi(deployment.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) Expect(err).NotTo(HaveOccurred()) Expect(newRevision).To(Equal(oldRevision + 1)) }) diff --git a/test/util/project/project.go b/test/util/project/project.go deleted file mode 100644 index 977212e8bbf..00000000000 --- a/test/util/project/project.go +++ /dev/null @@ -1,90 +0,0 @@ -package project - -// Copyright (c) Microsoft Corporation. -// Licensed under the Apache License 2.0. - -import ( - "context" - "fmt" - - projectv1 "github.com/openshift/api/project/v1" - projectclient "github.com/openshift/client-go/project/clientset/versioned" - authorizationv1 "k8s.io/api/authorization/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -type Project struct { - projectClient projectclient.Interface - cli kubernetes.Interface - Name string -} - -func NewProject(cli kubernetes.Interface, projectClient projectclient.Interface, name string) Project { - return Project{ - projectClient: projectClient, - cli: cli, - Name: name, - } -} - -func (p Project) Create(ctx context.Context) error { - _, err := p.projectClient.ProjectV1().Projects().Create(ctx, &projectv1.Project{ - ObjectMeta: metav1.ObjectMeta{ - Name: p.Name, - }, - }, metav1.CreateOptions{}) - return err -} - -func (p Project) Delete(ctx context.Context) error { - return p.projectClient.ProjectV1().Projects().Delete(ctx, p.Name, metav1.DeleteOptions{}) -} - -// VerifyProjectIsReady verifies that the project and relevant resources have been created correctly and returns error otherwise -func (p Project) Verify(ctx context.Context) error { - _, err := p.cli.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, - &authorizationv1.SelfSubjectAccessReview{ - Spec: authorizationv1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Namespace: p.Name, - Verb: "create", - Resource: "pods", - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - - sa, err := p.cli.CoreV1().ServiceAccounts(p.Name).Get(ctx, "default", metav1.GetOptions{}) - if err != nil || kerrors.IsNotFound(err) { - return fmt.Errorf("error retrieving default ServiceAccount") - } - - if len(sa.Secrets) == 0 { - return fmt.Errorf("default ServiceAccount does not have secrets") - } - - proj, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{}) - if err != nil { - return err - } - _, found := proj.Annotations["openshift.io/sa.scc.uid-range"] - if !found { - return fmt.Errorf("SCC annotation does not exist") - } - return nil -} - -// VerifyProjectIsDeleted verifies that the project does not exist and returns error if a project exists -// or if it encounters an error other than NotFound -func (p Project) VerifyProjectIsDeleted(ctx context.Context) error { - _, err := p.projectClient.ProjectV1().Projects().Get(ctx, p.Name, metav1.GetOptions{}) - if kerrors.IsNotFound(err) { - return nil - } - - return fmt.Errorf("Project exists") -}