From 28217134d850c11560a81ab275d526348bd62fc5 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Mon, 5 Jun 2023 17:22:06 -0400 Subject: [PATCH 01/19] Add not found check for notifications sa deletion Signed-off-by: Yi Cai --- controllers/argocd/notifications.go | 30 +++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/controllers/argocd/notifications.go b/controllers/argocd/notifications.go index 79a4766ed..e19d79717 100644 --- a/controllers/argocd/notifications.go +++ b/controllers/argocd/notifications.go @@ -140,8 +140,34 @@ func (r *ReconcileArgoCD) reconcileNotificationsServiceAccount(cr *argoprojv1a1. // SA exists but shouldn't, so it should be deleted if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting serviceaccount %s as notifications is disabled", sa.Name)) - return nil, r.Client.Delete(context.TODO(), sa) + // Define the Deployment and RoleBinding that depend on the ServiceAccount + deployment := &appsv1.Deployment{} + roleBinding := &rbacv1.RoleBinding{} + + deploymentName := fmt.Sprintf("%s-%s", cr.Name, "notifications-controller") + roleBindingName := fmt.Sprintf("%s-%s", cr.Name, common.ArgoCDNotificationsControllerComponent) + + depErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: deploymentName, Namespace: sa.Namespace}, deployment) + rbErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBindingName, Namespace: sa.Namespace}, roleBinding) + + // Check if either the Deployment or RoleBinding still exist + if errors.IsNotFound(depErr) && errors.IsNotFound(rbErr) { + // If both do not exist, delete the ServiceAccount + log.Info(fmt.Sprintf("Deleting ServiceAccount %s as notifications are disabled", sa.Name)) + return nil, r.Client.Delete(context.TODO(), sa) + } else if depErr != nil && !errors.IsNotFound(depErr) { + // If Deployment still exists, don't delete the ServiceAccount and requeue the reconcile request + log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by Deployment: %s", sa.Name, deploymentName)) + + // Deployment still exists, requeue the reconcile request + return nil, fmt.Errorf("deployment %s still exists, requeue the reconcile request", deploymentName) + } else if rbErr != nil && !errors.IsNotFound(rbErr) { + // If RoleBinding still exists, don't delete the ServiceAccount and requeue the reconcile request + log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by RoleBinding: %s", sa.Name, roleBindingName)) + + // RoleBinding still exists, requeue the reconcile request + return nil, fmt.Errorf("roleBinding %s still exists, requeue the reconcile request", roleBindingName) + } } return sa, nil From ec8ebb3443d1762d996bc423e155b494e210cdaf Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Mon, 5 Jun 2023 22:15:29 -0400 Subject: [PATCH 02/19] Separate deletion from predicate function Signed-off-by: Yi Cai --- controllers/argocd/util.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index aa7507014..5e45170dd 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -827,6 +827,11 @@ func (r *ReconcileArgoCD) reconcileResources(cr *argoprojv1a1.ArgoCD) error { if err := r.reconcileNotificationsController(cr); err != nil { return err } + } else { + log.Info("deleting Notifications controller resources") + if err := r.deleteNotificationsResources(cr); err != nil { + return err + } } if err := r.reconcileRepoServerTLSSecret(cr); err != nil { @@ -1027,14 +1032,14 @@ func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResou if !ok { return false } - if oldCR.Spec.Notifications.Enabled && !newCR.Spec.Notifications.Enabled { - err := r.deleteNotificationsResources(newCR) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to delete notifications controller resources for ArgoCD %s in namespace %s", - newCR.Name, newCR.Namespace)) - } - } - return true + // if oldCR.Spec.Notifications.Enabled && !newCR.Spec.Notifications.Enabled { + // err := r.deleteNotificationsResources(newCR) + // if err != nil { + // log.Error(err, fmt.Sprintf("Failed to delete notifications controller resources for ArgoCD %s in namespace %s", + // newCR.Name, newCR.Namespace)) + // } + // } + return oldCR.Spec.Notifications.Enabled && !newCR.Spec.Notifications.Enabled }, } From ec137447b582a516b06e68c4b5337944ac2098d2 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Tue, 6 Jun 2023 00:19:43 -0400 Subject: [PATCH 03/19] Updated reconcileNotificationsServiceAccount function Signed-off-by: Yi Cai --- controllers/argocd/notifications.go | 51 ++++++++++++++++------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/controllers/argocd/notifications.go b/controllers/argocd/notifications.go index e19d79717..c2b82f742 100644 --- a/controllers/argocd/notifications.go +++ b/controllers/argocd/notifications.go @@ -115,31 +115,36 @@ func (r *ReconcileArgoCD) deleteNotificationsResources(cr *argoprojv1a1.ArgoCD) func (r *ReconcileArgoCD) reconcileNotificationsServiceAccount(cr *argoprojv1a1.ArgoCD) (*corev1.ServiceAccount, error) { sa := newServiceAccountWithName(common.ArgoCDNotificationsControllerComponent, cr) + err := argoutil.FetchObject(r.Client, cr.Namespace, sa.Name, sa) - if err := argoutil.FetchObject(r.Client, cr.Namespace, sa.Name, sa); err != nil { - if !errors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get the serviceAccount associated with %s : %s", sa.Name, err) - } - - // SA doesn't exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { - return nil, nil - } - - // SA doesn't exist but should, so it should be created - if err := controllerutil.SetControllerReference(cr, sa, r.Scheme); err != nil { - return nil, err + if cr.Spec.Notifications.Enabled { + if err != nil { + // handle case when service account doesn't exist and should be created + if errors.IsNotFound(err) { + if err := controllerutil.SetControllerReference(cr, sa, r.Scheme); err != nil { + return nil, err + } + log.Info(fmt.Sprintf("Creating serviceaccount %s", sa.Name)) + if err := r.Client.Create(context.TODO(), sa); err != nil { + return nil, err + } + } else { + // handle other errors + return nil, fmt.Errorf("failed to get the serviceAccount associated with %s : %s", sa.Name, err) + } } - - log.Info(fmt.Sprintf("Creating serviceaccount %s", sa.Name)) - err := r.Client.Create(context.TODO(), sa) + // service account already exists, return it + return sa, nil + } else { if err != nil { - return nil, err + // handle case when service account doesn't exist and should not exist + if errors.IsNotFound(err) { + return nil, nil + } + // handle other errors + return nil, fmt.Errorf("failed to get the serviceAccount associated with %s : %s", sa.Name, err) } - } - - // SA exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { + // handle case when service account exists and should be deleted // Define the Deployment and RoleBinding that depend on the ServiceAccount deployment := &appsv1.Deployment{} roleBinding := &rbacv1.RoleBinding{} @@ -169,8 +174,8 @@ func (r *ReconcileArgoCD) reconcileNotificationsServiceAccount(cr *argoprojv1a1. return nil, fmt.Errorf("roleBinding %s still exists, requeue the reconcile request", roleBindingName) } } - - return sa, nil + // in case of deletion, return nil + return nil, nil } func (r *ReconcileArgoCD) reconcileNotificationsRole(cr *argoprojv1a1.ArgoCD) (*rbacv1.Role, error) { From 8f9d9bb738e3129a89bd6b3b3e84d76937f5c632 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Wed, 7 Jun 2023 14:15:31 -0400 Subject: [PATCH 04/19] Updated notifications reconciliation logic Signed-off-by: Yi Cai --- controllers/argocd/notifications.go | 336 +++++++++++++---------- controllers/argocd/notifications_test.go | 185 ++++++++++++- controllers/argocd/util.go | 3 + 3 files changed, 357 insertions(+), 167 deletions(-) diff --git a/controllers/argocd/notifications.go b/controllers/argocd/notifications.go index c2b82f742..57325d6dc 100644 --- a/controllers/argocd/notifications.go +++ b/controllers/argocd/notifications.go @@ -77,34 +77,33 @@ func (r *ReconcileArgoCD) deleteNotificationsResources(cr *argoprojv1a1.ArgoCD) } } - log.Info("reconciling notifications deployment") - if err := r.reconcileNotificationsDeployment(cr, sa); err != nil { + log.Info("delete notifications deployment") + if err := r.deleteNotificationsDeployment(cr); err != nil { return err } - log.Info("reconciling notifications secret") - if err := r.reconcileNotificationsSecret(cr); err != nil { + log.Info("delete notifications secret") + if err := r.deleteNotificationsSecret(cr); err != nil { return err } - log.Info("reconciling notifications configmap") - if err := r.reconcileNotificationsConfigMap(cr); err != nil { + log.Info("delete notifications configmap") + if err := r.deleteNotificationsConfigMap(cr); err != nil { return err } - log.Info("reconciling notifications role binding") - if err := r.reconcileNotificationsRoleBinding(cr, role, sa); err != nil { + log.Info("delete notifications role binding") + if err := r.deleteNotificationsRoleBinding(cr); err != nil { return err } - log.Info("reconciling notifications role") - _, err := r.reconcileNotificationsRole(cr) - if err != nil { + log.Info("delete notifications role") + if err := r.deleteNotificationsRole(cr); err != nil { return err } - log.Info("reconciling notifications serviceaccount") - _, err = r.reconcileNotificationsServiceAccount(cr) + log.Info("delete notifications serviceaccount") + err := r.deleteNotificationsServiceAccount(cr, sa) if err != nil { return err } @@ -113,69 +112,26 @@ func (r *ReconcileArgoCD) deleteNotificationsResources(cr *argoprojv1a1.ArgoCD) } func (r *ReconcileArgoCD) reconcileNotificationsServiceAccount(cr *argoprojv1a1.ArgoCD) (*corev1.ServiceAccount, error) { - sa := newServiceAccountWithName(common.ArgoCDNotificationsControllerComponent, cr) err := argoutil.FetchObject(r.Client, cr.Namespace, sa.Name, sa) - if cr.Spec.Notifications.Enabled { - if err != nil { - // handle case when service account doesn't exist and should be created - if errors.IsNotFound(err) { - if err := controllerutil.SetControllerReference(cr, sa, r.Scheme); err != nil { - return nil, err - } - log.Info(fmt.Sprintf("Creating serviceaccount %s", sa.Name)) - if err := r.Client.Create(context.TODO(), sa); err != nil { - return nil, err - } - } else { - // handle other errors - return nil, fmt.Errorf("failed to get the serviceAccount associated with %s : %s", sa.Name, err) + if err != nil { + // handle case when service account doesn't exist + if errors.IsNotFound(err) { + if err := controllerutil.SetControllerReference(cr, sa, r.Scheme); err != nil { + return nil, err } - } - // service account already exists, return it - return sa, nil - } else { - if err != nil { - // handle case when service account doesn't exist and should not exist - if errors.IsNotFound(err) { - return nil, nil + log.Info(fmt.Sprintf("Creating serviceaccount %s", sa.Name)) + if err := r.Client.Create(context.TODO(), sa); err != nil { + return nil, err } + } else { // handle other errors - return nil, fmt.Errorf("failed to get the serviceAccount associated with %s : %s", sa.Name, err) - } - // handle case when service account exists and should be deleted - // Define the Deployment and RoleBinding that depend on the ServiceAccount - deployment := &appsv1.Deployment{} - roleBinding := &rbacv1.RoleBinding{} - - deploymentName := fmt.Sprintf("%s-%s", cr.Name, "notifications-controller") - roleBindingName := fmt.Sprintf("%s-%s", cr.Name, common.ArgoCDNotificationsControllerComponent) - - depErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: deploymentName, Namespace: sa.Namespace}, deployment) - rbErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBindingName, Namespace: sa.Namespace}, roleBinding) - - // Check if either the Deployment or RoleBinding still exist - if errors.IsNotFound(depErr) && errors.IsNotFound(rbErr) { - // If both do not exist, delete the ServiceAccount - log.Info(fmt.Sprintf("Deleting ServiceAccount %s as notifications are disabled", sa.Name)) - return nil, r.Client.Delete(context.TODO(), sa) - } else if depErr != nil && !errors.IsNotFound(depErr) { - // If Deployment still exists, don't delete the ServiceAccount and requeue the reconcile request - log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by Deployment: %s", sa.Name, deploymentName)) - - // Deployment still exists, requeue the reconcile request - return nil, fmt.Errorf("deployment %s still exists, requeue the reconcile request", deploymentName) - } else if rbErr != nil && !errors.IsNotFound(rbErr) { - // If RoleBinding still exists, don't delete the ServiceAccount and requeue the reconcile request - log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by RoleBinding: %s", sa.Name, roleBindingName)) - - // RoleBinding still exists, requeue the reconcile request - return nil, fmt.Errorf("roleBinding %s still exists, requeue the reconcile request", roleBindingName) + return nil, fmt.Errorf("failed to get the serviceAccount associated with %s: %s", sa.Name, err) } } - // in case of deletion, return nil - return nil, nil + + return sa, nil } func (r *ReconcileArgoCD) reconcileNotificationsRole(cr *argoprojv1a1.ArgoCD) (*rbacv1.Role, error) { @@ -184,16 +140,12 @@ func (r *ReconcileArgoCD) reconcileNotificationsRole(cr *argoprojv1a1.ArgoCD) (* desiredRole := newRole(common.ArgoCDNotificationsControllerComponent, policyRules, cr) existingRole := &rbacv1.Role{} - if err := argoutil.FetchObject(r.Client, cr.Namespace, desiredRole.Name, existingRole); err != nil { + err := argoutil.FetchObject(r.Client, cr.Namespace, desiredRole.Name, existingRole) + if err != nil { if !errors.IsNotFound(err) { return nil, fmt.Errorf("failed to get the role associated with %s : %s", desiredRole.Name, err) } - // role does not exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { - return nil, nil - } - // role does not exist but should, so it should be created if err := controllerutil.SetControllerReference(cr, desiredRole, r.Scheme); err != nil { return nil, err @@ -207,12 +159,6 @@ func (r *ReconcileArgoCD) reconcileNotificationsRole(cr *argoprojv1a1.ArgoCD) (* return desiredRole, nil } - // role exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting role %s as notifications is disabled", existingRole.Name)) - return nil, r.Client.Delete(context.TODO(), existingRole) - } - // role exists and should. Reconcile role if changed if !reflect.DeepEqual(existingRole.Rules, desiredRole.Rules) { existingRole.Rules = desiredRole.Rules @@ -249,11 +195,6 @@ func (r *ReconcileArgoCD) reconcileNotificationsRoleBinding(cr *argoprojv1a1.Arg return fmt.Errorf("failed to get the rolebinding associated with %s : %s", desiredRoleBinding.Name, err) } - // roleBinding does not exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { - return nil - } - // roleBinding does not exist but should, so it should be created if err := controllerutil.SetControllerReference(cr, desiredRoleBinding, r.Scheme); err != nil { return err @@ -263,12 +204,6 @@ func (r *ReconcileArgoCD) reconcileNotificationsRoleBinding(cr *argoprojv1a1.Arg return r.Client.Create(context.TODO(), desiredRoleBinding) } - // roleBinding exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting roleBinding %s as notifications is disabled", existingRoleBinding.Name)) - return r.Client.Delete(context.TODO(), existingRoleBinding) - } - // roleBinding exists and should. Reconcile roleBinding if changed if !reflect.DeepEqual(existingRoleBinding.RoleRef, desiredRoleBinding.RoleRef) { // if the RoleRef changes, delete the existing role binding and create a new one @@ -287,9 +222,7 @@ func (r *ReconcileArgoCD) reconcileNotificationsRoleBinding(cr *argoprojv1a1.Arg } func (r *ReconcileArgoCD) reconcileNotificationsDeployment(cr *argoprojv1a1.ArgoCD, sa *corev1.ServiceAccount) error { - desiredDeployment := newDeploymentWithSuffix("notifications-controller", "controller", cr) - desiredDeployment.Spec.Strategy = appsv1.DeploymentStrategy{ Type: appsv1.RecreateDeploymentStrategyType, } @@ -368,19 +301,14 @@ func (r *ReconcileArgoCD) reconcileNotificationsDeployment(cr *argoprojv1a1.Argo }} // fetch existing deployment by name - deploymentChanged := false existingDeployment := &appsv1.Deployment{} - if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: desiredDeployment.Name, Namespace: cr.Namespace}, existingDeployment); err != nil { - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to get the deployment associated with %s : %s", existingDeployment.Name, err) - } - - // deployment does not exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { - return nil - } + err := r.Client.Get(context.TODO(), types.NamespacedName{Name: desiredDeployment.Name, Namespace: cr.Namespace}, existingDeployment) + if err != nil && !errors.IsNotFound(err) { + return err + } - // deployment does not exist but should, so it should be created + if err != nil && errors.IsNotFound(err) { + // deployment does not exist but should, so create it if err := controllerutil.SetControllerReference(cr, desiredDeployment, r.Scheme); err != nil { return err } @@ -389,15 +317,21 @@ func (r *ReconcileArgoCD) reconcileNotificationsDeployment(cr *argoprojv1a1.Argo return r.Client.Create(context.TODO(), desiredDeployment) } - // deployment exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting deployment %s as notifications is disabled", existingDeployment.Name)) - return r.Client.Delete(context.TODO(), existingDeployment) - } - // deployment exists and should. Reconcile deployment if changed + deploymentChanged := updateDeployment(existingDeployment, desiredDeployment) updateNodePlacement(existingDeployment, desiredDeployment, &deploymentChanged) + if deploymentChanged { + return r.Client.Update(context.TODO(), existingDeployment) + } + + return nil + +} + +func updateDeployment(existingDeployment, desiredDeployment *appsv1.Deployment) bool { + deploymentChanged := false + if existingDeployment.Spec.Template.Spec.Containers[0].Image != desiredDeployment.Spec.Template.Spec.Containers[0].Image { existingDeployment.Spec.Template.Spec.Containers[0].Image = desiredDeployment.Spec.Template.Spec.Containers[0].Image existingDeployment.Spec.Template.ObjectMeta.Labels["image.upgraded"] = time.Now().UTC().Format("01022006-150406-MST") @@ -455,55 +389,44 @@ func (r *ReconcileArgoCD) reconcileNotificationsDeployment(cr *argoprojv1a1.Argo deploymentChanged = true } - if deploymentChanged { - return r.Client.Update(context.TODO(), existingDeployment) - } - - return nil - + return deploymentChanged } // reconcileNotificationsConfigMap only creates/deletes the argocd-notifications-cm based on whether notifications is enabled/disabled in the CR // It does not reconcile/overwrite any fields or information in the configmap itself func (r *ReconcileArgoCD) reconcileNotificationsConfigMap(cr *argoprojv1a1.ArgoCD) error { - desiredConfigMap := newConfigMapWithName("argocd-notifications-cm", cr) desiredConfigMap.Data = getDefaultNotificationsConfig() - cmExists := true existingConfigMap := &corev1.ConfigMap{} - if err := argoutil.FetchObject(r.Client, cr.Namespace, desiredConfigMap.Name, existingConfigMap); err != nil { + err := argoutil.FetchObject(r.Client, cr.Namespace, desiredConfigMap.Name, existingConfigMap) + if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("failed to get the configmap associated with %s : %s", desiredConfigMap.Name, err) } - cmExists = false - } - if cmExists { - // CM exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting configmap %s as notifications is disabled", existingConfigMap.Name)) - return r.Client.Delete(context.TODO(), existingConfigMap) + // ConfigMap doesn't exist, so it should be created + if err := controllerutil.SetControllerReference(cr, desiredConfigMap, r.Scheme); err != nil { + return err } - // CM exists and should, nothing to do here - return nil - } + log.Info(fmt.Sprintf("Creating configmap %s", desiredConfigMap.Name)) + err := r.Client.Create(context.TODO(), desiredConfigMap) + if err != nil { + return err + } - // CM doesn't exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { return nil } - // CM doesn't exist but should, so it should be created - if err := controllerutil.SetControllerReference(cr, desiredConfigMap, r.Scheme); err != nil { - return err - } + // ConfigMap exists, reconcile if changed + if !reflect.DeepEqual(existingConfigMap.Data, desiredConfigMap.Data) { + existingConfigMap.Data = desiredConfigMap.Data + if err := controllerutil.SetControllerReference(cr, existingConfigMap, r.Scheme); err != nil { + return err + } - log.Info(fmt.Sprintf("Creating configmap %s", desiredConfigMap.Name)) - err := r.Client.Create(context.TODO(), desiredConfigMap) - if err != nil { - return err + return r.Client.Update(context.TODO(), existingConfigMap) } return nil @@ -512,7 +435,6 @@ func (r *ReconcileArgoCD) reconcileNotificationsConfigMap(cr *argoprojv1a1.ArgoC // reconcileNotificationsSecret only creates/deletes the argocd-notifications-secret based on whether notifications is enabled/disabled in the CR // It does not reconcile/overwrite any fields or information in the secret itself func (r *ReconcileArgoCD) reconcileNotificationsSecret(cr *argoprojv1a1.ArgoCD) error { - desiredSecret := argoutil.NewSecretWithName(cr, "argocd-notifications-secret") secretExists := true @@ -525,22 +447,10 @@ func (r *ReconcileArgoCD) reconcileNotificationsSecret(cr *argoprojv1a1.ArgoCD) } if secretExists { - // secret exists but shouldn't, so it should be deleted - if !cr.Spec.Notifications.Enabled { - log.Info(fmt.Sprintf("Deleting secret %s as notifications is disabled", existingSecret.Name)) - return r.Client.Delete(context.TODO(), existingSecret) - } - // secret exists and should, nothing to do here return nil } - // secret doesn't exist and shouldn't, nothing to do here - if !cr.Spec.Notifications.Enabled { - return nil - } - - // secret doesn't exist but should, so it should be created if err := controllerutil.SetControllerReference(cr, desiredSecret, r.Scheme); err != nil { return err } @@ -576,3 +486,125 @@ func getNotificationsResources(cr *argoprojv1a1.ArgoCD) corev1.ResourceRequireme return resources } + +func (r *ReconcileArgoCD) deleteNotificationsConfigMap(cr *argoprojv1a1.ArgoCD) error { + existingConfigMap := &corev1.ConfigMap{} + desiredConfigMap := newConfigMapWithName("argocd-notifications-cm", cr) + + if err := argoutil.FetchObject(r.Client, cr.Namespace, desiredConfigMap.Name, existingConfigMap); err != nil { + if !errors.IsNotFound(err) { + return err + } + + // ConfigMap does not exist and shouldn't, nothing to do here + return nil + } + + // ConfigMap exists but shouldn't, so it should be deleted + log.Info(fmt.Sprintf("Deleting configmap %s as notifications is disabled", existingConfigMap.Name)) + return r.Client.Delete(context.TODO(), existingConfigMap) +} + +func (r *ReconcileArgoCD) deleteNotificationsRole(cr *argoprojv1a1.ArgoCD) error { + desiredRole := newRole(common.ArgoCDNotificationsControllerComponent, nil, cr) + existingRole := &rbacv1.Role{} + + if err := argoutil.FetchObject(r.Client, cr.Namespace, desiredRole.Name, existingRole); err != nil { + // If an error other than 'NotFound' occurred during the fetch, return it + if !errors.IsNotFound(err) { + return err + } + // If the role doesn't exist, no further actions are necessary + return nil + } + + // At this point, the role exists, so proceed with its deletion + log.Info(fmt.Sprintf("Deleting role %s as notifications are disabled", existingRole.Name)) + return r.Client.Delete(context.TODO(), existingRole) +} + +func (r *ReconcileArgoCD) deleteNotificationsRoleBinding(cr *argoprojv1a1.ArgoCD) error { + existingRoleBinding := &rbacv1.RoleBinding{} + desiredRoleBinding := newRoleBindingWithname(common.ArgoCDNotificationsControllerComponent, cr) + if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: desiredRoleBinding.Name, Namespace: cr.Namespace}, existingRoleBinding); err != nil { + if !errors.IsNotFound(err) { + return err + } + // roleBinding does not exist and shouldn't, nothing to do here + return nil + } + + // roleBinding exists but shouldn't, so it should be deleted + log.Info(fmt.Sprintf("Deleting roleBinding %s as notifications is disabled", existingRoleBinding.Name)) + return r.Client.Delete(context.TODO(), existingRoleBinding) +} + +// deleteNotificationsSecret deletes the argocd-notifications-secret if notifications is disabled in the CR +func (r *ReconcileArgoCD) deleteNotificationsSecret(cr *argoprojv1a1.ArgoCD) error { + desiredSecret := argoutil.NewSecretWithName(cr, "argocd-notifications-secret") + existingSecret := &corev1.Secret{} + if err := argoutil.FetchObject(r.Client, cr.Namespace, desiredSecret.Name, existingSecret); err != nil { + if !errors.IsNotFound(err) { + return err + } + // secret doesn't exist and shouldn't, nothing to do here + return nil + } + + log.Info(fmt.Sprintf("Deleting secret %s as notifications is disabled", existingSecret.Name)) + return r.Client.Delete(context.TODO(), existingSecret) +} + +// deleteNotificationsDeployment deletes the notifications-controller Deployment if notifications are disabled. +func (r *ReconcileArgoCD) deleteNotificationsDeployment(cr *argoprojv1a1.ArgoCD) error { + desiredDeployment := newDeploymentWithSuffix("notifications-controller", "controller", cr) + existingDeployment := &appsv1.Deployment{} + if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: desiredDeployment.Name, Namespace: cr.Namespace}, existingDeployment); err != nil { + if !errors.IsNotFound(err) { + return err + } + // deployment does not exist and shouldn't, nothing to do here + return nil + } + + // Set controller reference before deleting the deployment + if err := controllerutil.SetControllerReference(cr, existingDeployment, r.Scheme); err != nil { + return err + } + + log.Info(fmt.Sprintf("Deleting deployment %s as notifications is disabled", existingDeployment.Name)) + return r.Client.Delete(context.TODO(), existingDeployment) +} + +func (r *ReconcileArgoCD) deleteNotificationsServiceAccount(cr *argoprojv1a1.ArgoCD, sa *corev1.ServiceAccount) error { + deployment := &appsv1.Deployment{} + roleBinding := &rbacv1.RoleBinding{} + + deploymentName := fmt.Sprintf("%s-%s", cr.Name, "notifications-controller") + roleBindingName := fmt.Sprintf("%s-%s", cr.Name, common.ArgoCDNotificationsControllerComponent) + + depErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: deploymentName, Namespace: sa.Namespace}, deployment) + rbErr := r.Client.Get(context.TODO(), types.NamespacedName{Name: roleBindingName, Namespace: sa.Namespace}, roleBinding) + + // Check if either the Deployment or RoleBinding still exist + if errors.IsNotFound(depErr) && errors.IsNotFound(rbErr) { + // If both do not exist, delete the ServiceAccount + log.Info(fmt.Sprintf("Deleting ServiceAccount %s as notifications are disabled", sa.Name)) + return r.Client.Delete(context.TODO(), sa) + } else if depErr != nil && !errors.IsNotFound(depErr) { + // If Deployment still exists, don't delete the ServiceAccount and requeue the reconcile request + log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by Deployment: %s", sa.Name, deploymentName)) + + // Deployment still exists, requeue the reconcile request + return fmt.Errorf("deployment %s still exists, requeue the reconcile request", deploymentName) + } else if rbErr != nil && !errors.IsNotFound(rbErr) { + // If RoleBinding still exists, don't delete the ServiceAccount and requeue the reconcile request + log.Info(fmt.Sprintf("Not deleting ServiceAccount %s because it is still used by RoleBinding: %s", sa.Name, roleBindingName)) + + // RoleBinding still exists, requeue the reconcile request + return fmt.Errorf("roleBinding %s still exists, requeue the reconcile request", roleBindingName) + } + + // The ServiceAccount is not used by any resources, no action needed + return nil +} diff --git a/controllers/argocd/notifications_test.go b/controllers/argocd/notifications_test.go index 2648f5a0c..8a998b6bf 100644 --- a/controllers/argocd/notifications_test.go +++ b/controllers/argocd/notifications_test.go @@ -41,10 +41,55 @@ func TestReconcileNotifications_CreateRoles(t *testing.T) { assert.Equal(t, desiredPolicyRules, testRole.Rules) - a.Spec.Notifications.Enabled = false + // Modify the rules of the existing role + testRole.Rules = append(testRole.Rules, rbacv1.PolicyRule{ + Verbs: []string{"get", "list"}, + APIGroups: []string{"", "apps", "extensions", "argoproj.io"}, + Resources: []string{"configmaps", "secrets"}, + }) + + // Update the role with the modified rules + assert.NoError(t, r.Client.Update(context.TODO(), testRole)) + + // Reconcile again to correct the state _, err = r.reconcileNotificationsRole(a) assert.NoError(t, err) + // Fetch the updated role + updatedRole := &rbacv1.Role{} + assert.NoError(t, r.Client.Get(context.TODO(), types.NamespacedName{ + Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), + Namespace: a.Namespace, + }, updatedRole)) + + // Assert that the rules have been updated back to match the desired state + assert.Equal(t, desiredPolicyRules, updatedRole.Rules) +} + +func TestReconcileNotifications_DeleteRoles(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = true + }) + + r := makeTestReconciler(t, a) + + // Initially create the Role + _, err := r.reconcileNotificationsRole(a) + assert.NoError(t, err) + + testRole := &rbacv1.Role{} + assert.NoError(t, r.Client.Get(context.TODO(), types.NamespacedName{ + Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), + Namespace: a.Namespace, + }, testRole)) + + // Disable notifications, which should trigger deletion of the Role + a.Spec.Notifications.Enabled = false + err = r.deleteNotificationsRole(a) + assert.NoError(t, err) + + // Check that the Role no longer exists err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), Namespace: a.Namespace, @@ -70,17 +115,39 @@ func TestReconcileNotifications_CreateServiceAccount(t *testing.T) { }, testSa)) assert.Equal(t, testSa.Name, desiredSa.Name) +} - a.Spec.Notifications.Enabled = false - _, err = r.reconcileNotificationsServiceAccount(a) +func TestReconcileNotifications_DeleteServiceAccount(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = false + }) + + r := makeTestReconciler(t, a) + + sa := newServiceAccountWithName(common.ArgoCDNotificationsControllerComponent, a) + + // Create the ServiceAccount + assert.NoError(t, r.Client.Create(context.TODO(), sa)) + + // Ensure the ServiceAccount is created + testSa := &corev1.ServiceAccount{} + assert.NoError(t, r.Client.Get(context.TODO(), types.NamespacedName{ + Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), + Namespace: a.Namespace, + }, testSa)) + assert.Equal(t, testSa.Name, sa.Name) + + // Call the deleteNotificationsServiceAccount function + err := r.deleteNotificationsServiceAccount(a, sa) assert.NoError(t, err) + // Ensure the ServiceAccount is deleted err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), Namespace: a.Namespace, }, testSa) assert.True(t, errors.IsNotFound(err)) - } func TestReconcileNotifications_CreateRoleBinding(t *testing.T) { @@ -107,11 +174,40 @@ func TestReconcileNotifications_CreateRoleBinding(t *testing.T) { assert.Equal(t, roleBinding.RoleRef.Name, role.Name) assert.Equal(t, roleBinding.Subjects[0].Name, sa.Name) +} - a.Spec.Notifications.Enabled = false - err = r.reconcileNotificationsRoleBinding(a, role, sa) +func TestReconcileNotifications_DeleteRoleBinding(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = true + }) + r := makeTestReconciler(t, a) + + role := &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: "role-name"}} + sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sa-name"}} + + // Assuming the role binding has been reconciled and exists + err := r.reconcileNotificationsRoleBinding(a, role, sa) assert.NoError(t, err) + roleBinding := &rbacv1.RoleBinding{} + assert.NoError(t, r.Client.Get( + context.TODO(), + types.NamespacedName{ + Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), + Namespace: a.Namespace, + }, + roleBinding)) + + // Confirm that the role binding exists + assert.Equal(t, roleBinding.RoleRef.Name, role.Name) + assert.Equal(t, roleBinding.Subjects[0].Name, sa.Name) + + // Now delete the role binding + err = r.deleteNotificationsRoleBinding(a) + assert.NoError(t, err) + + // Confirm the role binding has been deleted err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: generateResourceName(common.ArgoCDNotificationsControllerComponent, a), Namespace: a.Namespace, @@ -218,9 +314,32 @@ func TestReconcileNotifications_CreateDeployments(t *testing.T) { if diff := cmp.Diff(expectedSelector, deployment.Spec.Selector); diff != "" { t.Fatalf("failed to reconcile notifications-controller label selector:\n%s", diff) } +} +func TestReconcileNotifications_DeleteDeployments(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = true + }) + r := makeTestReconciler(t, a) + sa := corev1.ServiceAccount{} + assert.NoError(t, r.reconcileNotificationsDeployment(a, &sa)) + + deployment := &appsv1.Deployment{} + assert.NoError(t, r.Client.Get( + context.TODO(), + types.NamespacedName{ + Name: a.Name + "-notifications-controller", + Namespace: a.Namespace, + }, + deployment)) + + // Ensure the created Deployment has the expected properties + assert.Equal(t, deployment.Spec.Template.Spec.ServiceAccountName, sa.ObjectMeta.Name) + + // Ensure the deployment is deleted a.Spec.Notifications.Enabled = false - err := r.reconcileNotificationsDeployment(a, &sa) + err := r.deleteNotificationsDeployment(a) assert.NoError(t, err) err = r.Client.Get(context.TODO(), types.NamespacedName{ @@ -232,6 +351,7 @@ func TestReconcileNotifications_CreateDeployments(t *testing.T) { func TestReconcileNotifications_CreateSecret(t *testing.T) { logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { a.Spec.Notifications.Enabled = true }) @@ -246,12 +366,25 @@ func TestReconcileNotifications_CreateSecret(t *testing.T) { Name: "argocd-notifications-secret", Namespace: a.Namespace, }, testSecret)) +} - a.Spec.Notifications.Enabled = false - err = r.reconcileNotificationsSecret(a) +func TestDeleteNotifications_DeleteSecret(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = false + }) + + r := makeTestReconciler(t, a) + + err := r.deleteNotificationsSecret(a) assert.NoError(t, err) + secret := &corev1.Secret{} - err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "argocd-notifications-secret", Namespace: a.Namespace}, secret) + err = r.Client.Get(context.TODO(), types.NamespacedName{ + Name: "argocd-notifications-secret", + Namespace: a.Namespace, + }, secret) assertNotFound(t, err) } @@ -273,13 +406,35 @@ func TestReconcileNotifications_CreateConfigMap(t *testing.T) { }, testCm)) assert.True(t, len(testCm.Data) > 0) +} - a.Spec.Notifications.Enabled = false - err = r.reconcileNotificationsConfigMap(a) +func TestReconcileNotifications_DeleteConfigMap(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = false + }) + + r := makeTestReconciler(t, a) + + // Create the existing ConfigMap + existingCm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-notifications-cm", + Namespace: a.Namespace, + }, + } + assert.NoError(t, r.Client.Create(context.TODO(), existingCm)) + + err := r.deleteNotificationsConfigMap(a) assert.NoError(t, err) - testCm = &corev1.ConfigMap{} - err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "argocd-notifications-cm", Namespace: a.Namespace}, testCm) - assertNotFound(t, err) + + // Check if the ConfigMap was deleted + cm := &corev1.ConfigMap{} + err = r.Client.Get(context.TODO(), types.NamespacedName{ + Name: "argocd-notifications-cm", + Namespace: a.Namespace, + }, cm) + assert.True(t, errors.IsNotFound(err)) } func TestReconcileNotifications_testEnvVars(t *testing.T) { diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index 5e45170dd..88e0e6d62 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -709,6 +709,9 @@ func (r *ReconcileArgoCD) redisShouldUseTLS(cr *argoprojv1a1.ArgoCD) bool { // reconcileResources will reconcile common ArgoCD resources. func (r *ReconcileArgoCD) reconcileResources(cr *argoprojv1a1.ArgoCD) error { + log.Info("*** in reconcileResources !") + log.Info("cr.Spec.Notifications.Enabled: %s", cr.Spec.Notifications.Enabled) + // reconcile SSO first, because dex resources get reconciled through other function calls as well, not just through reconcileSSO (this is important // so that dex resources can be appropriately cleaned up when DISABLE_DEX is set to true and the operator pod restarts but doesn't enter // dex reconciliation again because dex is disabled, thus leaving hanging resources around if they are not also cleaned up in the main loop) From 077edfcf1778e1523b231b9025fd96033b5c5059 Mon Sep 17 00:00:00 2001 From: mortya Date: Tue, 6 Jun 2023 17:01:31 -0400 Subject: [PATCH 05/19] fix: replace kube-rbac-proxy image tag v0.8.0 with its digest equivalent (#510) (#919) Signed-off-by: Morty Abzug Co-authored-by: Morty Abzug Signed-off-by: Yi Cai --- bundle/manifests/argocd-operator.clusterserviceversion.yaml | 2 +- config/default/manager_auth_proxy_patch.yaml | 2 +- .../0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bundle/manifests/argocd-operator.clusterserviceversion.yaml b/bundle/manifests/argocd-operator.clusterserviceversion.yaml index 0befcbd04..166a5cee9 100644 --- a/bundle/manifests/argocd-operator.clusterserviceversion.yaml +++ b/bundle/manifests/argocd-operator.clusterserviceversion.yaml @@ -1085,7 +1085,7 @@ spec: - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + image: gcr.io/kubebuilder/kube-rbac-proxy@sha256:db06cc4c084dd0253134f156dddaaf53ef1c3fb3cc809e5d81711baa4029ea4c name: kube-rbac-proxy ports: - containerPort: 8443 diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index bb9222c1a..8ce6ca8ed 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + image: gcr.io/kubebuilder/kube-rbac-proxy@sha256:db06cc4c084dd0253134f156dddaaf53ef1c3fb3cc809e5d81711baa4029ea4c args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" diff --git a/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml b/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml index 0befcbd04..166a5cee9 100644 --- a/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml @@ -1085,7 +1085,7 @@ spec: - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + image: gcr.io/kubebuilder/kube-rbac-proxy@sha256:db06cc4c084dd0253134f156dddaaf53ef1c3fb3cc809e5d81711baa4029ea4c name: kube-rbac-proxy ports: - containerPort: 8443 From 3184a1b80f104582e68232643a9a182b04c3d21a Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Wed, 7 Jun 2023 15:49:29 -0400 Subject: [PATCH 06/19] Minor fix for log Signed-off-by: Yi Cai --- controllers/argocd/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index 88e0e6d62..34aa9e75b 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -710,7 +710,7 @@ func (r *ReconcileArgoCD) redisShouldUseTLS(cr *argoprojv1a1.ArgoCD) bool { func (r *ReconcileArgoCD) reconcileResources(cr *argoprojv1a1.ArgoCD) error { log.Info("*** in reconcileResources !") - log.Info("cr.Spec.Notifications.Enabled: %s", cr.Spec.Notifications.Enabled) + log.Info("cr.Spec.Notifications.Enabled: %v", cr.Spec.Notifications.Enabled) // reconcile SSO first, because dex resources get reconciled through other function calls as well, not just through reconcileSSO (this is important // so that dex resources can be appropriately cleaned up when DISABLE_DEX is set to true and the operator pod restarts but doesn't enter From f89841cf88a39d14ca55f820c2a035704394bb10 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Wed, 7 Jun 2023 16:07:40 -0400 Subject: [PATCH 07/19] Minor log fix p2 Signed-off-by: Yi Cai --- controllers/argocd/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index 34aa9e75b..c8232548c 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -710,7 +710,7 @@ func (r *ReconcileArgoCD) redisShouldUseTLS(cr *argoprojv1a1.ArgoCD) bool { func (r *ReconcileArgoCD) reconcileResources(cr *argoprojv1a1.ArgoCD) error { log.Info("*** in reconcileResources !") - log.Info("cr.Spec.Notifications.Enabled: %v", cr.Spec.Notifications.Enabled) + log.Info(fmt.Sprintf("cr.Spec.Notifications.Enabled: %v", cr.Spec.Notifications.Enabled)) // reconcile SSO first, because dex resources get reconciled through other function calls as well, not just through reconcileSSO (this is important // so that dex resources can be appropriately cleaned up when DISABLE_DEX is set to true and the operator pod restarts but doesn't enter From c89159ef7ba79654215395235673c4e70627b980 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Wed, 7 Jun 2023 18:25:17 -0400 Subject: [PATCH 08/19] Fixed unit tests Signed-off-by: Yi Cai --- controllers/argocd/argocd_controller_test.go | 4 +++- controllers/argocd/route_test.go | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/controllers/argocd/argocd_controller_test.go b/controllers/argocd/argocd_controller_test.go index 18a655d97..6700d3b03 100644 --- a/controllers/argocd/argocd_controller_test.go +++ b/controllers/argocd/argocd_controller_test.go @@ -75,7 +75,9 @@ func TestReconcileArgoCD_Reconcile_with_deleted(t *testing.T) { func TestReconcileArgoCD_Reconcile(t *testing.T) { logf.SetLogger(ZapLogger(true)) - a := makeTestArgoCD() + a := makeTestArgoCD(func(a *argov1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = true + }) r := makeTestReconciler(t, a) assert.NoError(t, createNamespace(r, a.Namespace, "")) diff --git a/controllers/argocd/route_test.go b/controllers/argocd/route_test.go index 2272335ee..3fae546ff 100644 --- a/controllers/argocd/route_test.go +++ b/controllers/argocd/route_test.go @@ -28,6 +28,7 @@ func TestReconcileRouteSetLabels(t *testing.T) { ctx := context.Background() logf.SetLogger(ZapLogger(true)) argoCD := makeArgoCD(func(a *argov1alpha1.ArgoCD) { + a.Spec.Notifications.Enabled = true a.Spec.Server.Route.Enabled = true labels := make(map[string]string) labels["my-key"] = "my-value" @@ -64,6 +65,7 @@ func TestReconcileRouteSetsInsecure(t *testing.T) { logf.SetLogger(ZapLogger(true)) argoCD := makeArgoCD(func(a *argov1alpha1.ArgoCD) { a.Spec.Server.Route.Enabled = true + a.Spec.Notifications.Enabled = true }) objs := []runtime.Object{ argoCD, @@ -136,6 +138,7 @@ func TestReconcileRouteUnsetsInsecure(t *testing.T) { argoCD := makeArgoCD(func(a *argov1alpha1.ArgoCD) { a.Spec.Server.Route.Enabled = true a.Spec.Server.Insecure = true + a.Spec.Notifications.Enabled = true }) objs := []runtime.Object{ argoCD, From d2faf6e858fb92b3a23bd87898e68cfdf5ceb79f Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Wed, 7 Jun 2023 21:54:04 -0400 Subject: [PATCH 09/19] code cleanup Signed-off-by: Yi Cai --- controllers/argocd/util.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index c8232548c..5ef930eb7 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -709,9 +709,6 @@ func (r *ReconcileArgoCD) redisShouldUseTLS(cr *argoprojv1a1.ArgoCD) bool { // reconcileResources will reconcile common ArgoCD resources. func (r *ReconcileArgoCD) reconcileResources(cr *argoprojv1a1.ArgoCD) error { - log.Info("*** in reconcileResources !") - log.Info(fmt.Sprintf("cr.Spec.Notifications.Enabled: %v", cr.Spec.Notifications.Enabled)) - // reconcile SSO first, because dex resources get reconciled through other function calls as well, not just through reconcileSSO (this is important // so that dex resources can be appropriately cleaned up when DISABLE_DEX is set to true and the operator pod restarts but doesn't enter // dex reconciliation again because dex is disabled, thus leaving hanging resources around if they are not also cleaned up in the main loop) @@ -1035,13 +1032,6 @@ func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResou if !ok { return false } - // if oldCR.Spec.Notifications.Enabled && !newCR.Spec.Notifications.Enabled { - // err := r.deleteNotificationsResources(newCR) - // if err != nil { - // log.Error(err, fmt.Sprintf("Failed to delete notifications controller resources for ArgoCD %s in namespace %s", - // newCR.Name, newCR.Namespace)) - // } - // } return oldCR.Spec.Notifications.Enabled && !newCR.Spec.Notifications.Enabled }, } From bf00243ecb2585f3686257e060ca52336c440640 Mon Sep 17 00:00:00 2001 From: Regina Scott <50851526+reginapizza@users.noreply.github.com> Date: Wed, 7 Jun 2023 23:27:11 -0400 Subject: [PATCH 10/19] docs; Add system-level configuration docs and fix minor typo (#825) Signed-off-by: Regina Scott Signed-off-by: Yi Cai --- docs/reference/argocd.md | 169 ++++++++++++++---- ...rgocd-with-resource-ignore-difference.yaml | 2 +- .../01-assert.yaml | 2 +- 3 files changed, 134 insertions(+), 39 deletions(-) diff --git a/docs/reference/argocd.md b/docs/reference/argocd.md index a6f5eb296..36f3a539f 100644 --- a/docs/reference/argocd.md +++ b/docs/reference/argocd.md @@ -1014,35 +1014,44 @@ There are two ways to customize resource behavior- the first way, only available !!! warning `resourceCustomizations` is being deprecated, and support will be removed in Argo CD Operator v0.8.0 so is encouraged to use `resourceHealthChecks`, `resourceIgnoreDifferences`, and `resourceActions` instead. It is the user's responsibility to not provide conflicting resources if they choose to use both methods of resource customizations. -### Resource Customizations (with subkeys) Example +### Resource Customizations (with subkeys) -Keys for `resourceHealthChecks`, `resourceIgnoreDifferences`, and `resourceActions` are in the form (respectively): `resource.customizations.health.`, `resource.customizations.ignoreDifferences.`, and `resource.customizations.actions.`. The following example defines a custom health check, custom action, and an ignoreDifferences config in the `argocd-cm` ConfigMap. Additionally, `.spec.resourceIgnoreDifferences.all` allows you to apply these specified settings to all resources managed by this Argo CD instance. +Keys for `resourceHealthChecks`, `resourceIgnoreDifferences`, and `resourceActions` are in the form (respectively): `resource.customizations.health.`, `resource.customizations.ignoreDifferences.`, and `resource.customizations.actions.`. -``` yaml -apiVersion: argoproj.io/v1alpha1 -kind: ArgoCD -metadata: - name: argocd +#### Application Level Configuration + +Argo CD allows ignoring differences at a specific JSON path, using [RFC6902 JSON patches](https://tools.ietf.org/html/rfc6902) and [JQ path expressions](https://stedolan.github.io/jq/manual/#path(path_expression)). It is also possible to ignore differences from fields owned by specific managers defined in `metadata.managedFields` in live resources. + +The following sample application is configured to ignore differences in `spec.replicas` for all deployments: + +```yaml spec: resourceIgnoreDifferences: - all: - jsonPointers: + resourceIdentifiers: + - group: apps + kind: Deployment + customization: + jsonPointers: - /spec/replicas - managedFieldsManagers: - - kube-controller-manager +``` + +Note that the `group` field relates to the [Kubernetes API group](https://kubernetes.io/docs/reference/using-api/#api-groups) without the version. + +To ignore elements of a list, you can use JQ path expressions to identify list items based on item content: +```yaml +spec: + resourceIgnoreDifferences: resourceIdentifiers: - - group: admissionregistration.k8s.io - kind: MutatingWebhookConfiguration - customization: - jqPathExpressions: - - '.webhooks[]?.clientConfig.caBundle' - - group: apps - kind: Deployment - customization: - managedFieldsManagers: - - kube-controller-manager - jsonPointers: - - /spec/replicas + - group: apps + kind: Deployment + customization: + jqPathExpressions: + - .spec.template.spec.initContainers[] | select(.name == "injected-init-container") +``` + +The following example defines a custom health check in the `argocd-cm` ConfigMap: +``` yaml +spec: resourceHealthChecks: - group: certmanager.k8s.io kind: Certificate @@ -1067,6 +1076,11 @@ spec: hs.status = "Progressing" hs.message = "Waiting for certificate" return hs +``` + +The following example defines a custom action in the `argocd-cm` ConfigMap: +``` yaml +spec: resourceActions: - group: apps kind: Deployment @@ -1089,24 +1103,15 @@ spec: obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") return obj ``` - After applying these changes your `argocd-cm` Configmap should contain the following fields: -``` -resource.customizations.ignoreDifferences.all: | - jsonPointers: - - /spec/replicas - managedFieldsManagers: - - kube-controller-manager +After applying these changes your `argocd-cm` Configmap should contain the following fields: -resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration: | - jqpathexpressions: - - '.webhooks[]?.clientConfig.caBundle' - -resource.customizations.ignoreDifferences.apps_deployments: | - managedFieldsManagers: - - kube-controller-manager +``` +resource.customizations.ignoreDifferences.apps_Deployment: | jsonPointers: - /spec/replicas + jqPathExpressions: + - .spec.template.spec.initContainers[] | select(.name == "injected-init-container") resource.customizations.health.certmanager.k8s.io_Certificate: | hs = {} @@ -1150,6 +1155,63 @@ resource.customizations.actions.apps_Deployment: | return obj ``` +#### System-Level Configuration +The comparison of resources with well-known issues can be customized at a system level. Ignored differences can be configured for a specified group and kind in `resource.customizations` key of `argocd-cm` ConfigMap. Following is an example of a customization which ignores the `caBundle` field of a `MutatingWebhookConfiguration` webhooks: + +```yaml +spec: + resourceIgnoreDifferences: + resourceIdentifiers: + - group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + customization: + jqPathExpressions: + - '.webhooks[]?.clientConfig.caBundle' +``` + +Resource customization can also be configured to ignore all differences made by a `managedField.manager` at the system level. The example bellow shows how to configure ArgoCD to ignore changes made by `kube-controller-manager` in `Deployment` resources. + +```yaml +spec: + resourceIgnoreDifferences: + resourceIdentifiers: + - group: apps + kind: Deployment + customization: + managedFieldsManagers: + - kube-controller-manager +``` + +It is possible to configure ignoreDifferences to be applied to all resources in every Application managed by an ArgoCD instance. In order to do so, resource customizations can be configured like in the example below: + +```yaml +spec: + resourceIgnoreDifferences: + all: + managedFieldsManagers: + - kube-controller-manager + jsonPointers: + - /spec/replicas +``` + +After applying these changes your `argocd-cm` Configmap should contain the following fields: + +``` +resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration: | + jqPathExpressions: + - '.webhooks[]?.clientConfig.caBundle' + +resource.customizations.ignoreDifferences.apps_Deployment: | + managedFieldsManagers: + - kube-controller-manager + +resource.customizations.ignoreDifferences.all: | + managedFieldsManagers: + - kube-controller-manager + jsonPointers: + - /spec/replicas +``` + ### Resource Customizations Example !!! warning @@ -1469,6 +1531,39 @@ Resources | `Requests`: CPU=500m, Mem=512Mi, `Limits`: CPU=1000m, Mem=1024Mi | T VerifyTLS | true | Whether to enforce strict TLS checking when communicating with Keycloak service. Version | OpenShift - `sha256:720a7e4c4926c41c1219a90daaea3b971a3d0da5a152a96fed4fb544d80f52e3` (7.5.1)
Kubernetes - `sha256:64fb81886fde61dee55091e6033481fa5ccdac62ae30a4fd29b54eb5e97df6a9` (15.0.2) | The tag to use with the keycloak container image. +## System-Level Configuration + +The comparison of resources with well-known issues can be customized at a system level. Ignored differences can be configured for a specified group and kind +in `resource.customizations` key of `argocd-cm` ConfigMap. Following is an example of a customization which ignores the `caBundle` field +of a `MutatingWebhookConfiguration` webhooks: + +```yaml +data: + resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration: | + jqPathExpressions: + - '.webhooks[]?.clientConfig.caBundle' +``` + +Resource customization can also be configured to ignore all differences made by a `managedFieldsManager` at the system level. The example bellow shows how to configure ArgoCD to ignore changes made by `kube-controller-manager` in `Deployment` resources. + +```yaml +data: + resource.customizations.ignoreDifferences.apps_Deployment: | + managedFieldsManagers: + - kube-controller-manager +``` + +It is possible to configure ignoreDifferences to be applied to all resources in every Application managed by an ArgoCD instance. In order to do so, resource customizations can be configured like in the example bellow: + +```yaml +data: + resource.customizations.ignoreDifferences.all: | + managedFieldsManagers: + - kube-controller-manager + jsonPointers: + - /spec/replicas +``` + ## TLS Options The following properties are available for configuring the TLS settings. diff --git a/tests/k8s/1-025_validate_resource_ignore_differences/01-argocd-with-resource-ignore-difference.yaml b/tests/k8s/1-025_validate_resource_ignore_differences/01-argocd-with-resource-ignore-difference.yaml index f15ef2f70..97e295d60 100644 --- a/tests/k8s/1-025_validate_resource_ignore_differences/01-argocd-with-resource-ignore-difference.yaml +++ b/tests/k8s/1-025_validate_resource_ignore_differences/01-argocd-with-resource-ignore-difference.yaml @@ -16,7 +16,7 @@ spec: - abc resourceIdentifiers: - group: apps - kind: deployments + kind: Deployment customization: jqPathExpressions: - xyz diff --git a/tests/k8s/1-025_validate_resource_ignore_differences/01-assert.yaml b/tests/k8s/1-025_validate_resource_ignore_differences/01-assert.yaml index 1eedf0037..c149a8449 100644 --- a/tests/k8s/1-025_validate_resource_ignore_differences/01-assert.yaml +++ b/tests/k8s/1-025_validate_resource_ignore_differences/01-assert.yaml @@ -24,7 +24,7 @@ data: managedfieldsmanagers: - xyz - abc - resource.customizations.ignoreDifferences.apps_deployments: | + resource.customizations.ignoreDifferences.apps_Deployment: | jqpathexpressions: - xyz - abc From df83e752ebc5edaeb0a65782eccc45b118d169c8 Mon Sep 17 00:00:00 2001 From: mortya Date: Wed, 7 Jun 2023 23:28:12 -0400 Subject: [PATCH 11/19] fix: add reference doc for Extra Config feature (#858) (#921) Signed-off-by: Morty Abzug Co-authored-by: Morty Abzug Signed-off-by: Yi Cai --- docs/reference/argocd.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/reference/argocd.md b/docs/reference/argocd.md index 36f3a539f..aaf9eb2a7 100644 --- a/docs/reference/argocd.md +++ b/docs/reference/argocd.md @@ -17,6 +17,7 @@ Name | Default | Description [**Controller**](#controller-options) | [Object] | Argo CD Application Controller options. [**Dex**](#dex-options) | [Object] | Dex configuration options. [**DisableAdmin**](#disable-admin) | `false` | Disable the admin user. +[**ExtraConfig**](#extra-config) | [Empty] | A catch-all mechanism to populate the argocd-cm configmap. [**GATrackingID**](#ga-tracking-id) | [Empty] | The google analytics tracking ID to use. [**GAAnonymizeUsers**](#ga-anonymize-users) | `false` | Enable hashed usernames sent to google analytics. [**Grafana**](#grafana-options) | [Object] | Grafana configuration options. @@ -310,6 +311,28 @@ spec: disableAdmin: true ``` +## Extra Config + +This is a generic mechanism to add new or otherwise-unsupported +features to the argocd-cm configmap. Manual edits to the argocd-cm +configmap will otherwise be automatically reverted. + +This defaults to empty. + +## Extra Config Example + +``` yaml +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd +spec: + extraConfig: + "accounts.argocd-devops": "apiKey" + "ping": "pong" // The same entry is reflected in Argo CD Configmap. +``` + + ## GA Tracking ID The google analytics tracking ID to use. This property maps directly to the `ga.trackingid` field in the `argocd-cm` ConfigMap. From 53b7b260b5715bbcc8ba542b4161aaf60c0103fd Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Thu, 8 Jun 2023 12:41:28 -0400 Subject: [PATCH 12/19] Fixed kuttl test 022 Signed-off-by: Yi Cai --- controllers/argocd/notifications.go | 16 +------ .../01-assert.yaml | 43 +++++++++++++++++++ .../02-assert.yaml | 6 +++ .../02-update-notifications-cm.yaml | 5 +-- .../03-assert.yaml | 14 ++++++ .../03-create-app.yaml | 3 +- .../04-delete-app.yaml | 4 +- 7 files changed, 70 insertions(+), 21 deletions(-) create mode 100644 tests/k8s/1-022_validate_notifications/01-assert.yaml create mode 100644 tests/k8s/1-022_validate_notifications/02-assert.yaml create mode 100644 tests/k8s/1-022_validate_notifications/03-assert.yaml diff --git a/controllers/argocd/notifications.go b/controllers/argocd/notifications.go index 57325d6dc..d4e29d505 100644 --- a/controllers/argocd/notifications.go +++ b/controllers/argocd/notifications.go @@ -411,24 +411,12 @@ func (r *ReconcileArgoCD) reconcileNotificationsConfigMap(cr *argoprojv1a1.ArgoC } log.Info(fmt.Sprintf("Creating configmap %s", desiredConfigMap.Name)) - err := r.Client.Create(context.TODO(), desiredConfigMap) - if err != nil { - return err - } - - return nil - } - - // ConfigMap exists, reconcile if changed - if !reflect.DeepEqual(existingConfigMap.Data, desiredConfigMap.Data) { - existingConfigMap.Data = desiredConfigMap.Data - if err := controllerutil.SetControllerReference(cr, existingConfigMap, r.Scheme); err != nil { + if err := r.Client.Create(context.TODO(), desiredConfigMap); err != nil { return err } - - return r.Client.Update(context.TODO(), existingConfigMap) } + // If the ConfigMap already exists, do nothing. return nil } diff --git a/tests/k8s/1-022_validate_notifications/01-assert.yaml b/tests/k8s/1-022_validate_notifications/01-assert.yaml new file mode 100644 index 000000000..6cb0e0da1 --- /dev/null +++ b/tests/k8s/1-022_validate_notifications/01-assert.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +status: + phase: Available + notificationsController: Running +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: argocd-notifications-controller +status: + conditions: + - type: Available + status: 'True' + - type: Progressing + status: 'True' +--- +kind: Secret +apiVersion: v1 +metadata: + name: argocd-notifications-secret +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: argocd-notifications-cm +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: argocd-argocd-notifications-controller +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: argocd-argocd-notifications-controller +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: argocd-argocd-notifications-controller diff --git a/tests/k8s/1-022_validate_notifications/02-assert.yaml b/tests/k8s/1-022_validate_notifications/02-assert.yaml new file mode 100644 index 000000000..46d87bdf3 --- /dev/null +++ b/tests/k8s/1-022_validate_notifications/02-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + service.email.gmail: "{host: smtp4dev, port: 2525, from: fake@email.com}" diff --git a/tests/k8s/1-022_validate_notifications/02-update-notifications-cm.yaml b/tests/k8s/1-022_validate_notifications/02-update-notifications-cm.yaml index 1b7c948b9..47b2083af 100644 --- a/tests/k8s/1-022_validate_notifications/02-update-notifications-cm.yaml +++ b/tests/k8s/1-022_validate_notifications/02-update-notifications-cm.yaml @@ -1,10 +1,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: -- script: sleep 15 - script: | set -e - kubectl patch cm argocd-notifications-cm -n $NAMESPACE --type merge -p '{"data": {"service.email.gmail": "{host: smtp4dev, port: 2525, from: fake@email.com }" }}' -- script: sleep 5 - + kubectl patch cm argocd-notifications-cm -n $NAMESPACE --type merge -p '{"data": {"service.email.gmail": "{host: smtp4dev, port: 2525, from: fake@email.com}" }}' \ No newline at end of file diff --git a/tests/k8s/1-022_validate_notifications/03-assert.yaml b/tests/k8s/1-022_validate_notifications/03-assert.yaml new file mode 100644 index 000000000..a71debccf --- /dev/null +++ b/tests/k8s/1-022_validate_notifications/03-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: my-app-3 + annotations: + "notifications.argoproj.io/subscribe.on-created.gmail": "jdfake@email.com" +spec: + destination: + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/redhat-developer/gitops-operator + path: test/examples/nginx + targetRevision: HEAD \ No newline at end of file diff --git a/tests/k8s/1-022_validate_notifications/03-create-app.yaml b/tests/k8s/1-022_validate_notifications/03-create-app.yaml index d9a436f7c..d942591ef 100644 --- a/tests/k8s/1-022_validate_notifications/03-create-app.yaml +++ b/tests/k8s/1-022_validate_notifications/03-create-app.yaml @@ -21,5 +21,4 @@ commands: repoURL: https://github.com/redhat-developer/gitops-operator path: test/examples/nginx targetRevision: HEAD - EOF -- script: sleep 5 \ No newline at end of file + EOF \ No newline at end of file diff --git a/tests/k8s/1-022_validate_notifications/04-delete-app.yaml b/tests/k8s/1-022_validate_notifications/04-delete-app.yaml index 47052970a..8d8f911b7 100644 --- a/tests/k8s/1-022_validate_notifications/04-delete-app.yaml +++ b/tests/k8s/1-022_validate_notifications/04-delete-app.yaml @@ -5,4 +5,6 @@ commands: set -e kubectl delete -n $NAMESPACE application.argoproj.io my-app-3 -- script: sleep 5 \ No newline at end of file +- script: sleep 5 +- script: | + ! kubectl get -n $NAMESPACE application.argoproj.io my-app-3 \ No newline at end of file From ebe642c279fcc408a690c993dec182c3b4996295 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Thu, 8 Jun 2023 13:40:42 -0400 Subject: [PATCH 13/19] Updated 022 test step 4 verification Signed-off-by: Yi Cai --- tests/k8s/1-022_validate_notifications/04-delete-app.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/k8s/1-022_validate_notifications/04-delete-app.yaml b/tests/k8s/1-022_validate_notifications/04-delete-app.yaml index 8d8f911b7..c7b6c781a 100644 --- a/tests/k8s/1-022_validate_notifications/04-delete-app.yaml +++ b/tests/k8s/1-022_validate_notifications/04-delete-app.yaml @@ -7,4 +7,8 @@ commands: kubectl delete -n $NAMESPACE application.argoproj.io my-app-3 - script: sleep 5 - script: | - ! kubectl get -n $NAMESPACE application.argoproj.io my-app-3 \ No newline at end of file + output=$(kubectl get -n $NAMESPACE application.argoproj.io my-app-3 --no-headers 2>/dev/null) + if [ -n "$output" ]; then + echo "my-app-3 still exists" + exit 1 + fi \ No newline at end of file From 0f55fd8a3a8f13e58496d0dce82912346b38ea60 Mon Sep 17 00:00:00 2001 From: Ishita Sequeira <46771830+ishitasequeira@users.noreply.github.com> Date: Thu, 8 Jun 2023 17:10:31 -0400 Subject: [PATCH 14/19] feat: Add dynamic sharding for argocd controller (#910) * Add sharding configuration options for dynamic scaling of application controller --------- Signed-off-by: ishitasequeira Signed-off-by: Yi Cai --- api/v1alpha1/argocd_types.go | 14 ++++ api/v1alpha1/zz_generated.deepcopy.go | 7 +- bundle/manifests/argoproj.io_argocds.yaml | 21 ++++++ config/crd/bases/argoproj.io_argocds.yaml | 21 ++++++ controllers/argocd/argocd_controller.go | 2 +- controllers/argocd/custommapper.go | 29 ++++++++ controllers/argocd/secret.go | 29 +++++--- controllers/argocd/statefulset.go | 53 +++++++++++++- controllers/argocd/statefulset_test.go | 72 +++++++++++++++++++ controllers/argocd/util.go | 10 ++- .../0.7.0/argoproj.io_argocds.yaml | 21 ++++++ docs/reference/argocd.md | 62 +++++++++++++--- .../01-assert.yaml | 6 ++ .../01-install.yaml | 16 +++++ .../02-add-cluster-secrets.yaml | 26 +++++++ .../02-assert.yaml | 17 +++++ .../03-assert.yaml | 27 +++++++ .../03-cluster-secrets-exceed-maxShards.yaml | 48 +++++++++++++ .../04-assert.yaml | 13 ++++ .../04-delete-cluster-secret.yaml | 7 ++ 20 files changed, 476 insertions(+), 25 deletions(-) create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/01-assert.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/01-install.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/02-add-cluster-secrets.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/02-assert.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/03-assert.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/03-cluster-secrets-exceed-maxShards.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/04-assert.yaml create mode 100644 tests/k8s/1-032_validate_dynamic_scaling/04-delete-cluster-secret.yaml diff --git a/api/v1alpha1/argocd_types.go b/api/v1alpha1/argocd_types.go index 8e257ddbe..72fdb5e3c 100644 --- a/api/v1alpha1/argocd_types.go +++ b/api/v1alpha1/argocd_types.go @@ -115,6 +115,20 @@ type ArgoCDApplicationControllerShardSpec struct { // Replicas defines the number of replicas to run in the Application controller shard. Replicas int32 `json:"replicas,omitempty"` + + // DynamicScalingEnabled defines whether dynamic scaling should be enabled for Application Controller component + DynamicScalingEnabled *bool `json:"dynamicScalingEnabled,omitempty"` + + // MinShards defines the minimum number of shards at any given point + // +kubebuilder:validation:Minimum=1 + MinShards int32 `json:"minShards,omitempty"` + + // MaxShards defines the maximum number of shards at any given point + MaxShards int32 `json:"maxShards,omitempty"` + + // ClustersPerShard defines the maximum number of clusters managed by each argocd shard + // +kubebuilder:validation:Minimum=1 + ClustersPerShard int32 `json:"clustersPerShard,omitempty"` } // ArgoCDApplicationSet defines whether the Argo CD ApplicationSet controller should be installed. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 2e7bdc700..7ea277a89 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ArgoCDApplicationControllerProcessorsSpec) DeepCopy() *ArgoCDApplicati // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ArgoCDApplicationControllerShardSpec) DeepCopyInto(out *ArgoCDApplicationControllerShardSpec) { *out = *in + if in.DynamicScalingEnabled != nil { + in, out := &in.DynamicScalingEnabled, &out.DynamicScalingEnabled + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArgoCDApplicationControllerShardSpec. @@ -101,7 +106,7 @@ func (in *ArgoCDApplicationControllerSpec) DeepCopyInto(out *ArgoCDApplicationCo *out = new(metav1.Duration) **out = **in } - out.Sharding = in.Sharding + in.Sharding.DeepCopyInto(&out.Sharding) if in.Env != nil { in, out := &in.Env, &out.Env *out = make([]v1.EnvVar, len(*in)) diff --git a/bundle/manifests/argoproj.io_argocds.yaml b/bundle/manifests/argoproj.io_argocds.yaml index 9b86bd949..d47d3cabf 100644 --- a/bundle/manifests/argoproj.io_argocds.yaml +++ b/bundle/manifests/argoproj.io_argocds.yaml @@ -537,10 +537,31 @@ spec: description: Sharding contains the options for the Application Controller sharding configuration. properties: + clustersPerShard: + description: ClustersPerShard defines the maximum number of + clusters managed by each argocd shard + format: int32 + minimum: 1 + type: integer + dynamicScalingEnabled: + description: DynamicScalingEnabled defines whether dynamic + scaling should be enabled for Application Controller component + type: boolean enabled: description: Enabled defines whether sharding should be enabled on the Application Controller component. type: boolean + maxShards: + description: MaxShards defines the maximum number of shards + at any given point + format: int32 + type: integer + minShards: + description: MinShards defines the minimum number of shards + at any given point + format: int32 + minimum: 1 + type: integer replicas: description: Replicas defines the number of replicas to run in the Application controller shard. diff --git a/config/crd/bases/argoproj.io_argocds.yaml b/config/crd/bases/argoproj.io_argocds.yaml index 27ddb0cb4..a870120c2 100644 --- a/config/crd/bases/argoproj.io_argocds.yaml +++ b/config/crd/bases/argoproj.io_argocds.yaml @@ -539,10 +539,31 @@ spec: description: Sharding contains the options for the Application Controller sharding configuration. properties: + clustersPerShard: + description: ClustersPerShard defines the maximum number of + clusters managed by each argocd shard + format: int32 + minimum: 1 + type: integer + dynamicScalingEnabled: + description: DynamicScalingEnabled defines whether dynamic + scaling should be enabled for Application Controller component + type: boolean enabled: description: Enabled defines whether sharding should be enabled on the Application Controller component. type: boolean + maxShards: + description: MaxShards defines the maximum number of shards + at any given point + format: int32 + type: integer + minShards: + description: MinShards defines the minimum number of shards + at any given point + format: int32 + minimum: 1 + type: integer replicas: description: Replicas defines the number of replicas to run in the Application controller shard. diff --git a/controllers/argocd/argocd_controller.go b/controllers/argocd/argocd_controller.go index 0e8baeb78..a8cadae70 100644 --- a/controllers/argocd/argocd_controller.go +++ b/controllers/argocd/argocd_controller.go @@ -150,6 +150,6 @@ func (r *ReconcileArgoCD) Reconcile(ctx context.Context, request ctrl.Request) ( // SetupWithManager sets up the controller with the Manager. func (r *ReconcileArgoCD) SetupWithManager(mgr ctrl.Manager) error { bldr := ctrl.NewControllerManagedBy(mgr) - r.setResourceWatches(bldr, r.clusterResourceMapper, r.tlsSecretMapper, r.namespaceResourceMapper) + r.setResourceWatches(bldr, r.clusterResourceMapper, r.tlsSecretMapper, r.namespaceResourceMapper, r.clusterSecretResourceMapper) return bldr.Complete(r) } diff --git a/controllers/argocd/custommapper.go b/controllers/argocd/custommapper.go index eba5a3356..d1c11a949 100644 --- a/controllers/argocd/custommapper.go +++ b/controllers/argocd/custommapper.go @@ -153,3 +153,32 @@ func (r *ReconcileArgoCD) namespaceResourceMapper(o client.Object) []reconcile.R return result } + +// clusterSecretResourceMapper maps a watch event on a namespace, back to the +// ArgoCD object that we want to reconcile. +func (r *ReconcileArgoCD) clusterSecretResourceMapper(o client.Object) []reconcile.Request { + var result = []reconcile.Request{} + + labels := o.GetLabels() + if v, ok := labels[common.ArgoCDSecretTypeLabel]; ok && v == "cluster" { + argocds := &argoprojv1alpha1.ArgoCDList{} + if err := r.Client.List(context.TODO(), argocds, &client.ListOptions{Namespace: o.GetNamespace()}); err != nil { + return result + } + + if len(argocds.Items) != 1 { + return result + } + + argocd := argocds.Items[0] + namespacedName := client.ObjectKey{ + Name: argocd.Name, + Namespace: argocd.Namespace, + } + result = []reconcile.Request{ + {NamespacedName: namespacedName}, + } + } + + return result +} diff --git a/controllers/argocd/secret.go b/controllers/argocd/secret.go index 90843fad2..f47d083b3 100644 --- a/controllers/argocd/secret.go +++ b/controllers/argocd/secret.go @@ -460,17 +460,11 @@ func (r *ReconcileArgoCD) reconcileClusterPermissionsSecret(cr *argoprojv1a1.Arg clusterConfigInstance = true } - clusterSecrets := &corev1.SecretList{} - opts := &client.ListOptions{ - LabelSelector: labels.SelectorFromSet(map[string]string{ - common.ArgoCDSecretTypeLabel: "cluster", - }), - Namespace: cr.Namespace, - } - - if err := r.Client.List(context.TODO(), clusterSecrets, opts); err != nil { + clusterSecrets, err := r.getClusterSecrets(cr) + if err != nil { return err } + for _, s := range clusterSecrets.Items { // check if cluster secret with default server address exists if string(s.Data["server"]) == common.ArgoCDDefaultServer { @@ -686,3 +680,20 @@ func (r *ReconcileArgoCD) reconcileSecrets(cr *argoprojv1a1.ArgoCD) error { return nil } + +func (r *ReconcileArgoCD) getClusterSecrets(cr *argoprojv1a1.ArgoCD) (*corev1.SecretList, error) { + + clusterSecrets := &corev1.SecretList{} + opts := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + common.ArgoCDSecretTypeLabel: "cluster", + }), + Namespace: cr.Namespace, + } + + if err := r.Client.List(context.TODO(), clusterSecrets, opts); err != nil { + return nil, err + } + + return clusterSecrets, nil +} diff --git a/controllers/argocd/statefulset.go b/controllers/argocd/statefulset.go index 319e95878..005a91121 100644 --- a/controllers/argocd/statefulset.go +++ b/controllers/argocd/statefulset.go @@ -451,13 +451,60 @@ func getArgoControllerContainerEnv(cr *argoprojv1a1.ArgoCD) []corev1.EnvVar { return env } -func (r *ReconcileArgoCD) reconcileApplicationControllerStatefulSet(cr *argoprojv1a1.ArgoCD, useTLSForRedis bool) error { +func (r *ReconcileArgoCD) getApplicationControllerReplicaCount(cr *argoprojv1a1.ArgoCD) int32 { var replicas int32 = common.ArgocdApplicationControllerDefaultReplicas + var minShards int32 = cr.Spec.Controller.Sharding.MinShards + var maxShards int32 = cr.Spec.Controller.Sharding.MaxShards + + if cr.Spec.Controller.Sharding.DynamicScalingEnabled != nil && *cr.Spec.Controller.Sharding.DynamicScalingEnabled { + + // TODO: add the same validations to Validation Webhook once webhook has been introduced + if minShards < 1 { + log.Info("Minimum number of shards cannot be less than 1. Setting default value to 1") + minShards = 1 + } + + if maxShards < minShards { + log.Info("Maximum number of shards cannot be less than minimum number of shards. Setting maximum shards same as minimum shards") + maxShards = minShards + } + + clustersPerShard := cr.Spec.Controller.Sharding.ClustersPerShard + if clustersPerShard < 1 { + log.Info("clustersPerShard cannot be less than 1. Defaulting to 1.") + clustersPerShard = 1 + } + + clusterSecrets, err := r.getClusterSecrets(cr) + if err != nil { + // If we were not able to query cluster secrets, return the default count of replicas (ArgocdApplicationControllerDefaultReplicas) + log.Error(err, "Error retreiving cluster secrets for ArgoCD instance %s", cr.Name) + return replicas + } + + replicas = int32(len(clusterSecrets.Items)) / clustersPerShard - if cr.Spec.Controller.Sharding.Replicas != 0 && cr.Spec.Controller.Sharding.Enabled { - replicas = cr.Spec.Controller.Sharding.Replicas + if replicas < minShards { + replicas = minShards + } + + if replicas > maxShards { + replicas = maxShards + } + + return replicas + + } else if cr.Spec.Controller.Sharding.Replicas != 0 && cr.Spec.Controller.Sharding.Enabled { + return cr.Spec.Controller.Sharding.Replicas } + return replicas +} + +func (r *ReconcileArgoCD) reconcileApplicationControllerStatefulSet(cr *argoprojv1a1.ArgoCD, useTLSForRedis bool) error { + + replicas := r.getApplicationControllerReplicaCount(cr) + ss := newStatefulSetWithSuffix("application-controller", "application-controller", cr) ss.Spec.Replicas = &replicas controllerEnv := cr.Spec.Controller.Env diff --git a/controllers/argocd/statefulset_test.go b/controllers/argocd/statefulset_test.go index 406e56622..e1b027591 100644 --- a/controllers/argocd/statefulset_test.go +++ b/controllers/argocd/statefulset_test.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" argoprojv1alpha1 "github.com/argoproj-labs/argocd-operator/api/v1alpha1" + "github.com/argoproj-labs/argocd-operator/controllers/argoutil" "github.com/google/go-cmp/cmp" appsv1 "k8s.io/api/apps/v1" @@ -450,3 +451,74 @@ func Test_ContainsValidImage(t *testing.T) { } } + +func TestReconcileArgoCD_reconcileApplicationController_withDynamicSharding(t *testing.T) { + logf.SetLogger(ZapLogger(true)) + + tests := []struct { + sharding argoprojv1alpha1.ArgoCDApplicationControllerShardSpec + expectedReplicas int32 + vars []corev1.EnvVar + }{ + { + sharding: argoprojv1alpha1.ArgoCDApplicationControllerShardSpec{ + Enabled: false, + Replicas: 1, + DynamicScalingEnabled: boolPtr(true), + MinShards: 2, + MaxShards: 4, + ClustersPerShard: 1, + }, + expectedReplicas: 3, + }, + { + // Replicas less than minimum shards + sharding: argoprojv1alpha1.ArgoCDApplicationControllerShardSpec{ + Enabled: false, + Replicas: 1, + DynamicScalingEnabled: boolPtr(true), + MinShards: 1, + MaxShards: 4, + ClustersPerShard: 3, + }, + expectedReplicas: 1, + }, + { + // Replicas more than maximum shards + sharding: argoprojv1alpha1.ArgoCDApplicationControllerShardSpec{ + Enabled: false, + Replicas: 1, + DynamicScalingEnabled: boolPtr(true), + MinShards: 1, + MaxShards: 2, + ClustersPerShard: 1, + }, + expectedReplicas: 2, + }, + } + + for _, st := range tests { + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Controller.Sharding = st.sharding + }) + + clusterSecret1 := argoutil.NewSecretWithSuffix(a, "cluster1") + clusterSecret1.Labels = map[string]string{common.ArgoCDSecretTypeLabel: "cluster"} + + clusterSecret2 := argoutil.NewSecretWithSuffix(a, "cluster2") + clusterSecret2.Labels = map[string]string{common.ArgoCDSecretTypeLabel: "cluster"} + + clusterSecret3 := argoutil.NewSecretWithSuffix(a, "cluster3") + clusterSecret3.Labels = map[string]string{common.ArgoCDSecretTypeLabel: "cluster"} + + r := makeTestReconciler(t, a) + assert.NoError(t, r.Client.Create(context.TODO(), clusterSecret1)) + assert.NoError(t, r.Client.Create(context.TODO(), clusterSecret2)) + assert.NoError(t, r.Client.Create(context.TODO(), clusterSecret3)) + + replicas := r.getApplicationControllerReplicaCount(a) + + assert.Equal(t, int32(st.expectedReplicas), replicas) + + } +} diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index 5ef930eb7..4e835892c 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -944,7 +944,7 @@ func removeString(slice []string, s string) []string { } // setResourceWatches will register Watches for each of the supported Resources. -func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResourceMapper, tlsSecretMapper, namespaceResourceMapper handler.MapFunc) *builder.Builder { +func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResourceMapper, tlsSecretMapper, namespaceResourceMapper, clusterSecretResourceMapper handler.MapFunc) *builder.Builder { deploymentConfigPred := predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { @@ -1060,6 +1060,8 @@ func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResou clusterResourceHandler := handler.EnqueueRequestsFromMapFunc(clusterResourceMapper) + clusterSecretResourceHandler := handler.EnqueueRequestsFromMapFunc(clusterSecretResourceMapper) + tlsSecretHandler := handler.EnqueueRequestsFromMapFunc(tlsSecretMapper) bldr.Watches(&source.Kind{Type: &v1.ClusterRoleBinding{}}, clusterResourceHandler) @@ -1069,6 +1071,12 @@ func (r *ReconcileArgoCD) setResourceWatches(bldr *builder.Builder, clusterResou // Watch for secrets of type TLS that might be created by external processes bldr.Watches(&source.Kind{Type: &corev1.Secret{Type: corev1.SecretTypeTLS}}, tlsSecretHandler) + // Watch for cluster secrets added to the argocd instance + bldr.Watches(&source.Kind{Type: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + common.ArgoCDManagedByClusterArgoCDLabel: "cluster", + }}}}, clusterSecretResourceHandler) + // Watch for changes to Secret sub-resources owned by ArgoCD instances. bldr.Owns(&appsv1.StatefulSet{}) diff --git a/deploy/olm-catalog/argocd-operator/0.7.0/argoproj.io_argocds.yaml b/deploy/olm-catalog/argocd-operator/0.7.0/argoproj.io_argocds.yaml index 9b86bd949..d47d3cabf 100644 --- a/deploy/olm-catalog/argocd-operator/0.7.0/argoproj.io_argocds.yaml +++ b/deploy/olm-catalog/argocd-operator/0.7.0/argoproj.io_argocds.yaml @@ -537,10 +537,31 @@ spec: description: Sharding contains the options for the Application Controller sharding configuration. properties: + clustersPerShard: + description: ClustersPerShard defines the maximum number of + clusters managed by each argocd shard + format: int32 + minimum: 1 + type: integer + dynamicScalingEnabled: + description: DynamicScalingEnabled defines whether dynamic + scaling should be enabled for Application Controller component + type: boolean enabled: description: Enabled defines whether sharding should be enabled on the Application Controller component. type: boolean + maxShards: + description: MaxShards defines the maximum number of shards + at any given point + format: int32 + type: integer + minShards: + description: MinShards defines the minimum number of shards + at any given point + format: int32 + minimum: 1 + type: integer replicas: description: Replicas defines the number of replicas to run in the Application controller shard. diff --git a/docs/reference/argocd.md b/docs/reference/argocd.md index aaf9eb2a7..1b11c86ce 100644 --- a/docs/reference/argocd.md +++ b/docs/reference/argocd.md @@ -146,16 +146,20 @@ spec: The following properties are available for configuring the Argo CD Application Controller component. -Name | Default | Description ---- | --- | --- -Processors.Operation | 10 | The number of operation processors. -Processors.Status | 20 | The number of status processors. -Resources | [Empty] | The container compute resources. -LogLevel | info | The log level to be used by the ArgoCD Application Controller component. Valid options are debug, info, error, and warn. -AppSync | 3m | AppSync is used to control the sync frequency of ArgoCD Applications -Sharding.enabled | false | Whether to enable sharding on the ArgoCD Application Controller component. Useful when managing a large number of clusters to relieve memory pressure on the controller component. -Sharding.replicas | 1 | The number of replicas that will be used to support sharding of the ArgoCD Application Controller. -Env | [Empty] | Environment to set for the application controller workloads +Name | Default | Description | Validation Criteira | +--- | --- | --- | --- +Processors.Operation | 10 | The number of operation processors. | | +Processors.Status | 20 | The number of status processors. | | +Resources | [Empty] | The container compute resources. | | +LogLevel | info | The log level to be used by the ArgoCD Application Controller component. | Valid options are debug, info, error, and warn. | +AppSync | 3m | AppSync is used to control the sync frequency of ArgoCD Applications | | +Sharding.enabled | false | Whether to enable sharding on the ArgoCD Application Controller component. Useful when managing a large number of clusters to relieve memory pressure on the controller component. | | +Sharding.replicas | 1 | The number of replicas that will be used to support sharding of the ArgoCD Application Controller. | Must be greater than 0 | +Env | [Empty] | Environment to set for the application controller workloads | | +Sharding.dynamicScalingEnabled | true | Whether to enable dynamic scaling of the ArgoCD Application Controller component. This will ignore the configuration of `Sharding.enabled` and `Sharding.replicas` | | +Sharding.minShards | 1 | The minimum number of replicas of the ArgoCD Application Controller component. | Must be greater than 0 | +Sharding.maxShards | 1 | The maximum number of replicas of the ArgoCD Application Controller component. | Must be greater than `Sharding.minShards` | +Sharding.clustersPerShard | 1 | The number of clusters that need to be handles by each shard. In case the replica count has reached the maxShards, the shards will manage more than one cluster. | Must be greater than 0 | ### Controller Example @@ -192,6 +196,44 @@ spec: value: '120' ``` +The following example shows how to set multiple replicas of Argo CD Application Controller. This example will scale up/down the Argo CD Application Controller based on the parameter clustersPerShard. The number of replicas will be set between minShards and maxShards. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd + labels: + example: controller +spec: + controller: + sharding: + dynamicScalingEnabled: true + minShards: 2 + maxShards: 5 + clustersPerShard: 10 +``` + +!!! note + In case the number of replicas required is less than the minShards the number of replicas will be set as minShards. Similarly, if the required number of replicas exceeds maxShards, the replica count will be set as maxShards. + + +The following example shows how to enable dynamic scaling of the ArgoCD Application Controller component. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd + labels: + example: controller +spec: + controller: + sharding: + enabled: true + replicas: 5 +``` + ## Dex Options !!! warning diff --git a/tests/k8s/1-032_validate_dynamic_scaling/01-assert.yaml b/tests/k8s/1-032_validate_dynamic_scaling/01-assert.yaml new file mode 100644 index 000000000..dc419f168 --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/01-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +status: + phase: Available \ No newline at end of file diff --git a/tests/k8s/1-032_validate_dynamic_scaling/01-install.yaml b/tests/k8s/1-032_validate_dynamic_scaling/01-install.yaml new file mode 100644 index 000000000..b70c30661 --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/01-install.yaml @@ -0,0 +1,16 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +spec: + controller: + sharding: + dynamicScalingEnabled: true + minShards: 1 + maxShards: 4 + clustersPerShard: 1 +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: sleep 30s \ No newline at end of file diff --git a/tests/k8s/1-032_validate_dynamic_scaling/02-add-cluster-secrets.yaml b/tests/k8s/1-032_validate_dynamic_scaling/02-add-cluster-secrets.yaml new file mode 100644 index 000000000..92e4d7e56 --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/02-add-cluster-secrets.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-1 + labels: + "argocd.argoproj.io/secret-type": "cluster" +stringData: + name: mycluster-1.com + server: https://mycluster-1.com +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-2 + labels: + "argocd.argoproj.io/secret-type": "cluster" +type: Opaque +stringData: + name: mycluster-2.com + server: https://mycluster-2.com +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: sleep 30s diff --git a/tests/k8s/1-032_validate_dynamic_scaling/02-assert.yaml b/tests/k8s/1-032_validate_dynamic_scaling/02-assert.yaml new file mode 100644 index 000000000..2a7afd31e --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/02-assert.yaml @@ -0,0 +1,17 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 720 +--- +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +status: + phase: Available +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: argocd-application-controller +status: + replicas: 3 diff --git a/tests/k8s/1-032_validate_dynamic_scaling/03-assert.yaml b/tests/k8s/1-032_validate_dynamic_scaling/03-assert.yaml new file mode 100644 index 000000000..cd383a7ac --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/03-assert.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +status: + phase: Available +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: argocd-application-controller +status: + replicas: 4 +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 720 +commands: +- script: | + stsReplicas=$(kubectl get sts argocd-application-controller -n $NAMESPACE -o jsonpath='{.status.replicas}') + echo "$stsReplicas" + if test "$stsReplicas" != "4"; then + echo "FAILED! Number of replicas not equal to maxShards" + exit 1 + fi + exit 0 diff --git a/tests/k8s/1-032_validate_dynamic_scaling/03-cluster-secrets-exceed-maxShards.yaml b/tests/k8s/1-032_validate_dynamic_scaling/03-cluster-secrets-exceed-maxShards.yaml new file mode 100644 index 000000000..53d1e14a1 --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/03-cluster-secrets-exceed-maxShards.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-3 + labels: + "argocd.argoproj.io/secret-type": "cluster" +stringData: + name: mycluster-3.com + server: https://mycluster-3.com +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-4 + labels: + "argocd.argoproj.io/secret-type": "cluster" +type: Opaque +stringData: + name: mycluster-4.com + server: https://mycluster-4.com +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-5 + labels: + "argocd.argoproj.io/secret-type": "cluster" +type: Opaque +stringData: + name: mycluster-5.com + server: https://mycluster-5.com +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-6 + labels: + "argocd.argoproj.io/secret-type": "cluster" +type: Opaque +stringData: + name: mycluster-6.com + server: https://mycluster-6.com +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: sleep 60s \ No newline at end of file diff --git a/tests/k8s/1-032_validate_dynamic_scaling/04-assert.yaml b/tests/k8s/1-032_validate_dynamic_scaling/04-assert.yaml new file mode 100644 index 000000000..df1df20ca --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/04-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: argocd +status: + phase: Available +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: argocd-application-controller +status: + replicas: 2 diff --git a/tests/k8s/1-032_validate_dynamic_scaling/04-delete-cluster-secret.yaml b/tests/k8s/1-032_validate_dynamic_scaling/04-delete-cluster-secret.yaml new file mode 100644 index 000000000..fd0de8408 --- /dev/null +++ b/tests/k8s/1-032_validate_dynamic_scaling/04-delete-cluster-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: sleep 60s + - command: kubectl delete Secret cluster-2 cluster-3 cluster-4 cluster-5 cluster-6 -n $NAMESPACE + # Sleep to allow resources to be completely deleted + - command: sleep 30s From 7d597811656a172c9ceb5a434a8aada43990dbc2 Mon Sep 17 00:00:00 2001 From: Kevin Huber Date: Tue, 13 Jun 2023 14:12:29 +0200 Subject: [PATCH 15/19] Read the reconciliation timeout from the appSync setting (#926) Signed-off-by: ItsKev Signed-off-by: Yi Cai --- api/v1alpha1/argocd_types.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 6 ----- controllers/argocd/deployment_test.go | 6 ----- controllers/argocd/statefulset.go | 7 ++++++ controllers/argocd/statefulset_test.go | 32 ++++++++++++++++++++++++++ controllers/argocd/util.go | 4 ---- controllers/argocd/util_test.go | 24 ------------------- 7 files changed, 40 insertions(+), 41 deletions(-) diff --git a/api/v1alpha1/argocd_types.go b/api/v1alpha1/argocd_types.go index 72fdb5e3c..925492e30 100644 --- a/api/v1alpha1/argocd_types.go +++ b/api/v1alpha1/argocd_types.go @@ -98,7 +98,7 @@ type ArgoCDApplicationControllerSpec struct { // Set this to a duration, e.g. 10m or 600s to control the synchronisation // frequency. // +optional - AppSync *metav1.Duration `json:"appSync,omitempty"` + AppSync string `json:"appSync,omitempty"` // Sharding contains the options for the Application Controller sharding configuration. Sharding ArgoCDApplicationControllerShardSpec `json:"sharding,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7ea277a89..ad0a78075 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,6 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -101,11 +100,6 @@ func (in *ArgoCDApplicationControllerSpec) DeepCopyInto(out *ArgoCDApplicationCo *out = new(v1.ResourceRequirements) (*in).DeepCopyInto(*out) } - if in.AppSync != nil { - in, out := &in.AppSync, &out.AppSync - *out = new(metav1.Duration) - **out = **in - } in.Sharding.DeepCopyInto(&out.Sharding) if in.Env != nil { in, out := &in.Env, &out.Env diff --git a/controllers/argocd/deployment_test.go b/controllers/argocd/deployment_test.go index d2fdc36c4..1b217840e 100644 --- a/controllers/argocd/deployment_test.go +++ b/controllers/argocd/deployment_test.go @@ -5,7 +5,6 @@ import ( "reflect" "strings" "testing" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -1359,11 +1358,6 @@ func operationProcessors(n int32) argoCDOpt { } } -func appSync(d time.Duration) argoCDOpt { - return func(a *argoprojv1alpha1.ArgoCD) { - a.Spec.Controller.AppSync = &metav1.Duration{Duration: d} - } -} func Test_UpdateNodePlacement(t *testing.T) { deployment := &appsv1.Deployment{ diff --git a/controllers/argocd/statefulset.go b/controllers/argocd/statefulset.go index 005a91121..cd7c5a5c6 100644 --- a/controllers/argocd/statefulset.go +++ b/controllers/argocd/statefulset.go @@ -448,6 +448,13 @@ func getArgoControllerContainerEnv(cr *argoprojv1a1.ArgoCD) []corev1.EnvVar { }) } + if cr.Spec.Controller.AppSync != "" { + env = append(env, corev1.EnvVar{ + Name: "ARGOCD_RECONCILIATION_TIMEOUT", + Value: cr.Spec.Controller.AppSync, + }) + } + return env } diff --git a/controllers/argocd/statefulset_test.go b/controllers/argocd/statefulset_test.go index e1b027591..8f7c95392 100644 --- a/controllers/argocd/statefulset_test.go +++ b/controllers/argocd/statefulset_test.go @@ -372,6 +372,38 @@ func TestReconcileArgoCD_reconcileApplicationController_withSharding(t *testing. } } +func TestReconcileArgoCD_reconcileApplicationController_withAppSync(t *testing.T) { + + expectedEnv := []corev1.EnvVar{ + {Name: "ARGOCD_RECONCILIATION_TIMEOUT", Value: "10m"}, + {Name: "HOME", Value: "/home/argocd"}, + } + + a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { + a.Spec.Controller.AppSync = "10m" + }) + r := makeTestReconciler(t, a) + + assert.NoError(t, r.reconcileApplicationControllerStatefulSet(a, false)) + + ss := &appsv1.StatefulSet{} + assert.NoError(t, r.Client.Get( + context.TODO(), + types.NamespacedName{ + Name: "argocd-application-controller", + Namespace: a.Namespace, + }, + ss)) + + env := ss.Spec.Template.Spec.Containers[0].Env + + diffEnv := cmp.Diff(env, expectedEnv) + + if diffEnv != "" { + t.Fatalf("Reconciliation of EnvVars failed:\n%s", diffEnv) + } +} + func Test_UpdateNodePlacementStateful(t *testing.T) { ss := &appsv1.StatefulSet{ diff --git a/controllers/argocd/util.go b/controllers/argocd/util.go index 4e835892c..119f23b05 100644 --- a/controllers/argocd/util.go +++ b/controllers/argocd/util.go @@ -141,10 +141,6 @@ func getArgoApplicationControllerCommand(cr *argoprojv1a1.ArgoCD, useTLSForRedis cmd = append(cmd, "--application-namespaces", fmt.Sprint(strings.Join(cr.Spec.SourceNamespaces, ","))) } - if cr.Spec.Controller.AppSync != nil { - cmd = append(cmd, "--app-resync", strconv.FormatInt(int64(cr.Spec.Controller.AppSync.Seconds()), 10)) - } - cmd = append(cmd, "--loglevel") cmd = append(cmd, getLogLevel(cr.Spec.Controller.LogLevel)) diff --git a/controllers/argocd/util_test.go b/controllers/argocd/util_test.go index 4ac28f618..bce11586a 100644 --- a/controllers/argocd/util_test.go +++ b/controllers/argocd/util_test.go @@ -6,7 +6,6 @@ import ( "reflect" "strings" "testing" - "time" "github.com/stretchr/testify/assert" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -358,29 +357,6 @@ func TestGetArgoApplicationControllerCommand(t *testing.T) { "text", }, }, - { - "configured appSync", - []argoCDOpt{appSync(time.Minute * 10)}, - []string{ - "argocd-application-controller", - "--operation-processors", - "10", - "--redis", - "argocd-redis.argocd.svc.cluster.local:6379", - "--repo-server", - "argocd-repo-server.argocd.svc.cluster.local:8081", - "--status-processors", - "20", - "--kubectl-parallelism-limit", - "10", - "--app-resync", - "600", - "--loglevel", - "info", - "--logformat", - "text", - }, - }, { "configured parallelism limit", []argoCDOpt{parallelismLimit(30)}, From 0a44b96260751133d9f4f4d3e3b5bed21c6de90e Mon Sep 17 00:00:00 2001 From: Jaideep Rao Date: Tue, 13 Jun 2023 08:30:14 -0400 Subject: [PATCH 16/19] feat: add e2e-test to verify hpa handling behavior (#904) * upgrade golangci-lint Signed-off-by: Jaideep Rao * add kuttl test to verify hpa handling behavior Signed-off-by: Jaideep Rao * fix typo in assert file name Signed-off-by: Jaideep Rao --------- Signed-off-by: Jaideep Rao Signed-off-by: Yi Cai --- .../1-032_validate_server_hpa/01-assert.yaml | 19 +++++++++++++++++++ .../1-032_validate_server_hpa/01-install.yaml | 7 +++++++ .../02-enable-server-autoscale.yaml | 17 +++++++++++++++++ .../03-check-server-replicas.yaml | 14 ++++++++++++++ .../1-032_validate_server_hpa/04-assert.yaml | 18 ++++++++++++++++++ .../04-update-hpa.yaml | 17 +++++++++++++++++ 6 files changed, 92 insertions(+) create mode 100644 tests/k8s/1-032_validate_server_hpa/01-assert.yaml create mode 100644 tests/k8s/1-032_validate_server_hpa/01-install.yaml create mode 100644 tests/k8s/1-032_validate_server_hpa/02-enable-server-autoscale.yaml create mode 100644 tests/k8s/1-032_validate_server_hpa/03-check-server-replicas.yaml create mode 100644 tests/k8s/1-032_validate_server_hpa/04-assert.yaml create mode 100644 tests/k8s/1-032_validate_server_hpa/04-update-hpa.yaml diff --git a/tests/k8s/1-032_validate_server_hpa/01-assert.yaml b/tests/k8s/1-032_validate_server_hpa/01-assert.yaml new file mode 100644 index 000000000..c0f46d127 --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/01-assert.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd +status: + phase: Available +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: example-argocd-server +spec: + replicas: 2 +status: + conditions: + - type: Available + status: 'True' + - type: Progressing + status: 'True' \ No newline at end of file diff --git a/tests/k8s/1-032_validate_server_hpa/01-install.yaml b/tests/k8s/1-032_validate_server_hpa/01-install.yaml new file mode 100644 index 000000000..8823faff2 --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/01-install.yaml @@ -0,0 +1,7 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd +spec: + server: + replicas: 2 \ No newline at end of file diff --git a/tests/k8s/1-032_validate_server_hpa/02-enable-server-autoscale.yaml b/tests/k8s/1-032_validate_server_hpa/02-enable-server-autoscale.yaml new file mode 100644 index 000000000..a94c883c7 --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/02-enable-server-autoscale.yaml @@ -0,0 +1,17 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd +spec: + server: + replicas: 2 + autoscale: + enabled: true + hpa: + minReplicas: 4 + maxReplicas: 7 + targetCPUUtilizationPercentage: 50 + scaleTargetRef: + kind: deployment + apiVersion: apps/v1 + name: example-argocd-server \ No newline at end of file diff --git a/tests/k8s/1-032_validate_server_hpa/03-check-server-replicas.yaml b/tests/k8s/1-032_validate_server_hpa/03-check-server-replicas.yaml new file mode 100644 index 000000000..02cce48e2 --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/03-check-server-replicas.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: sleep 45 +- script: | + set -e + serverReplicas=$(kubectl get -n $NAMESPACE deployment/example-argocd-server -o jsonpath='{.spec.replicas}') + + if [ "$serverReplicas" -ge 4 ] && [ "$serverReplicas" -le 7 ]; then + echo "server replica count matches expectation" + exit 0 + fi + exit 1 + done \ No newline at end of file diff --git a/tests/k8s/1-032_validate_server_hpa/04-assert.yaml b/tests/k8s/1-032_validate_server_hpa/04-assert.yaml new file mode 100644 index 000000000..3fafb2f0b --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/04-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: example-argocd-server +spec: + maxReplicas: 12 + metrics: + - resource: + name: cpu + target: + averageUtilization: 50 + type: Utilization + type: Resource + minReplicas: 8 + scaleTargetRef: + apiVersion: apps/v1 + kind: deployment + name: example-argocd-server diff --git a/tests/k8s/1-032_validate_server_hpa/04-update-hpa.yaml b/tests/k8s/1-032_validate_server_hpa/04-update-hpa.yaml new file mode 100644 index 000000000..bbccdccc6 --- /dev/null +++ b/tests/k8s/1-032_validate_server_hpa/04-update-hpa.yaml @@ -0,0 +1,17 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ArgoCD +metadata: + name: example-argocd +spec: + server: + replicas: 2 + autoscale: + enabled: true + hpa: + minReplicas: 8 + maxReplicas: 12 + targetCPUUtilizationPercentage: 50 + scaleTargetRef: + kind: deployment + apiVersion: apps/v1 + name: example-argocd-server \ No newline at end of file From db0bb53fed0bb03ad1e55da601e56782d947ef7f Mon Sep 17 00:00:00 2001 From: Kevin Huber Date: Tue, 13 Jun 2023 18:50:32 +0200 Subject: [PATCH 17/19] fix: Revert appSync type implemented in #926 (#935) Signed-off-by: ItsKev Signed-off-by: Yi Cai --- api/v1alpha1/argocd_types.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 6 ++++++ controllers/argocd/statefulset.go | 5 +++-- controllers/argocd/statefulset_test.go | 5 +++-- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/argocd_types.go b/api/v1alpha1/argocd_types.go index 925492e30..72fdb5e3c 100644 --- a/api/v1alpha1/argocd_types.go +++ b/api/v1alpha1/argocd_types.go @@ -98,7 +98,7 @@ type ArgoCDApplicationControllerSpec struct { // Set this to a duration, e.g. 10m or 600s to control the synchronisation // frequency. // +optional - AppSync string `json:"appSync,omitempty"` + AppSync *metav1.Duration `json:"appSync,omitempty"` // Sharding contains the options for the Application Controller sharding configuration. Sharding ArgoCDApplicationControllerShardSpec `json:"sharding,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ad0a78075..7ea277a89 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,7 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -100,6 +101,11 @@ func (in *ArgoCDApplicationControllerSpec) DeepCopyInto(out *ArgoCDApplicationCo *out = new(v1.ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.AppSync != nil { + in, out := &in.AppSync, &out.AppSync + *out = new(metav1.Duration) + **out = **in + } in.Sharding.DeepCopyInto(&out.Sharding) if in.Env != nil { in, out := &in.Env, &out.Env diff --git a/controllers/argocd/statefulset.go b/controllers/argocd/statefulset.go index cd7c5a5c6..d8d1776d3 100644 --- a/controllers/argocd/statefulset.go +++ b/controllers/argocd/statefulset.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "reflect" + "strconv" "time" appsv1 "k8s.io/api/apps/v1" @@ -448,10 +449,10 @@ func getArgoControllerContainerEnv(cr *argoprojv1a1.ArgoCD) []corev1.EnvVar { }) } - if cr.Spec.Controller.AppSync != "" { + if cr.Spec.Controller.AppSync != nil { env = append(env, corev1.EnvVar{ Name: "ARGOCD_RECONCILIATION_TIMEOUT", - Value: cr.Spec.Controller.AppSync, + Value: strconv.FormatInt(int64(cr.Spec.Controller.AppSync.Seconds()), 10) + "s", }) } diff --git a/controllers/argocd/statefulset_test.go b/controllers/argocd/statefulset_test.go index 8f7c95392..4a454b4ee 100644 --- a/controllers/argocd/statefulset_test.go +++ b/controllers/argocd/statefulset_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "testing" + "time" resourcev1 "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -375,12 +376,12 @@ func TestReconcileArgoCD_reconcileApplicationController_withSharding(t *testing. func TestReconcileArgoCD_reconcileApplicationController_withAppSync(t *testing.T) { expectedEnv := []corev1.EnvVar{ - {Name: "ARGOCD_RECONCILIATION_TIMEOUT", Value: "10m"}, + {Name: "ARGOCD_RECONCILIATION_TIMEOUT", Value: "600s"}, {Name: "HOME", Value: "/home/argocd"}, } a := makeTestArgoCD(func(a *argoprojv1alpha1.ArgoCD) { - a.Spec.Controller.AppSync = "10m" + a.Spec.Controller.AppSync = &metav1.Duration{Duration: time.Minute * 10} }) r := makeTestReconciler(t, a) From 0602da7fbb6074cf14b0f76742c2c9259eb20ac5 Mon Sep 17 00:00:00 2001 From: Arthur Vardevanyan Date: Wed, 14 Jun 2023 07:50:11 -0400 Subject: [PATCH 18/19] Update Operator Role for Monitoring Flag (#925) Signed-off-by: Arthur Signed-off-by: Yi Cai --- bundle/manifests/argocd-operator.clusterserviceversion.yaml | 1 + config/rbac/role.yaml | 1 + controllers/argocd/argocd_controller.go | 2 +- .../0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bundle/manifests/argocd-operator.clusterserviceversion.yaml b/bundle/manifests/argocd-operator.clusterserviceversion.yaml index 166a5cee9..980787b9c 100644 --- a/bundle/manifests/argocd-operator.clusterserviceversion.yaml +++ b/bundle/manifests/argocd-operator.clusterserviceversion.yaml @@ -1004,6 +1004,7 @@ spec: - monitoring.coreos.com resources: - prometheuses + - prometheusrules - servicemonitors verbs: - '*' diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3dab5fd13..b0e378d1c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -99,6 +99,7 @@ rules: - monitoring.coreos.com resources: - prometheuses + - prometheusrules - servicemonitors verbs: - '*' diff --git a/controllers/argocd/argocd_controller.go b/controllers/argocd/argocd_controller.go index a8cadae70..e2e7abb00 100644 --- a/controllers/argocd/argocd_controller.go +++ b/controllers/argocd/argocd_controller.go @@ -57,7 +57,7 @@ var log = logr.Log.WithName("controller_argocd") //+kubebuilder:rbac:groups=batch,resources=cronjobs;jobs,verbs=* //+kubebuilder:rbac:groups=config.openshift.io,resources=clusterversions,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=* -//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheuses;servicemonitors,verbs=* +//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheuses;prometheusrules;servicemonitors,verbs=* //+kubebuilder:rbac:groups=route.openshift.io,resources=routes;routes/custom-host,verbs=* //+kubebuilder:rbac:groups=argoproj.io,resources=applications;appprojects,verbs=* //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=*,verbs=* diff --git a/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml b/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml index 166a5cee9..980787b9c 100644 --- a/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/argocd-operator/0.7.0/argocd-operator.v0.7.0.clusterserviceversion.yaml @@ -1004,6 +1004,7 @@ spec: - monitoring.coreos.com resources: - prometheuses + - prometheusrules - servicemonitors verbs: - '*' From 1e74381e878eb3107f115b6d6b79a61679322ad2 Mon Sep 17 00:00:00 2001 From: Yi Cai Date: Tue, 20 Jun 2023 17:46:12 -0400 Subject: [PATCH 19/19] Fix failed e2e tests Signed-off-by: Yi Cai --- .../01-assert.yaml | 4 ---- .../04-assert.yaml | 4 ---- .../02-check-deployment.yaml | 12 ++++++---- .../04-check-deployment.yaml | 12 ++++++---- .../06-check-deployment.yaml | 22 ++++++++++++------- .../05-verify-email.yaml | 15 +++---------- .../03-delete-argocd.yaml | 12 ++++++---- .../99-delete.yaml | 2 -- .../99-errors.yaml | 14 ++++++++++++ 9 files changed, 55 insertions(+), 42 deletions(-) create mode 100644 tests/k8s/1-024_validate_apps_in_any_namespace/99-errors.yaml diff --git a/tests/ha/1-020_validate_redis_ha_nonha/01-assert.yaml b/tests/ha/1-020_validate_redis_ha_nonha/01-assert.yaml index 77e2f772f..3fdf539dc 100644 --- a/tests/ha/1-020_validate_redis_ha_nonha/01-assert.yaml +++ b/tests/ha/1-020_validate_redis_ha_nonha/01-assert.yaml @@ -1,7 +1,3 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 720 ---- apiVersion: v1 kind: Service metadata: diff --git a/tests/ha/1-020_validate_redis_ha_nonha/04-assert.yaml b/tests/ha/1-020_validate_redis_ha_nonha/04-assert.yaml index 2d48ff872..c48877c14 100644 --- a/tests/ha/1-020_validate_redis_ha_nonha/04-assert.yaml +++ b/tests/ha/1-020_validate_redis_ha_nonha/04-assert.yaml @@ -1,7 +1,3 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 720 ---- apiVersion: argoproj.io/v1alpha1 kind: ArgoCD metadata: diff --git a/tests/k8s/1-014_validate_parallelism_limit/02-check-deployment.yaml b/tests/k8s/1-014_validate_parallelism_limit/02-check-deployment.yaml index b60954eb0..d2b23a0bb 100644 --- a/tests/k8s/1-014_validate_parallelism_limit/02-check-deployment.yaml +++ b/tests/k8s/1-014_validate_parallelism_limit/02-check-deployment.yaml @@ -1,13 +1,17 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: -- script: sleep 30 - script: | set -e - expected=10 - wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]' ) - if ! echo "$wlCommand" | grep -qPz -- "--kubectl-parallelism-limit\\n${expected}(\$|\\n)"; then + expected="10" + wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]') + if ! echo "$wlCommand" | grep -Fxq -- "--kubectl-parallelism-limit"; then echo "Incorrect or missing --kubectl-parallelism-limit detected." echo "$wlCommand" exit 1 + fi + if ! echo "$wlCommand" | grep -Fxq -- "$expected"; then + echo "Incorrect value for --kubectl-parallelism-limit detected." + echo "$wlCommand" + exit 1 fi \ No newline at end of file diff --git a/tests/k8s/1-014_validate_parallelism_limit/04-check-deployment.yaml b/tests/k8s/1-014_validate_parallelism_limit/04-check-deployment.yaml index edbdbe5eb..8c710be9b 100644 --- a/tests/k8s/1-014_validate_parallelism_limit/04-check-deployment.yaml +++ b/tests/k8s/1-014_validate_parallelism_limit/04-check-deployment.yaml @@ -1,13 +1,17 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: -- script: sleep 30 - script: | set -e - expected=20 - wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]' ) - if ! echo "$wlCommand" | grep -qPz -- "--kubectl-parallelism-limit\\n${expected}(\$|\\n)"; then + expected="20" + wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]') + if ! echo "$wlCommand" | grep -Fxq -- "--kubectl-parallelism-limit"; then echo "Incorrect or missing --kubectl-parallelism-limit detected." echo "$wlCommand" exit 1 + fi + if ! echo "$wlCommand" | grep -Fxq -- "$expected"; then + echo "Incorrect value for --kubectl-parallelism-limit detected." + echo "$wlCommand" + exit 1 fi \ No newline at end of file diff --git a/tests/k8s/1-014_validate_parallelism_limit/06-check-deployment.yaml b/tests/k8s/1-014_validate_parallelism_limit/06-check-deployment.yaml index 753984460..f5eed3237 100644 --- a/tests/k8s/1-014_validate_parallelism_limit/06-check-deployment.yaml +++ b/tests/k8s/1-014_validate_parallelism_limit/06-check-deployment.yaml @@ -1,16 +1,22 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep -commands: -- script: | - set -e +wait: +- condition: command-exit-code + timeout: 30s + command: | kubectl patch -n $NAMESPACE argocds/argocd --type=json --patch '[{"op": "remove", "path": "/spec/controller/parallelismLimit"}]' -- script: sleep 30 + successCodes: [0] - script: | set -e - expected=10 - wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]' ) - if ! echo "$wlCommand" | grep -qPz -- "--kubectl-parallelism-limit\\n${expected}(\$|\\n)"; then + expected="10" + wlCommand=$(kubectl get -n $NAMESPACE statefulset/argocd-application-controller -o jsonpath='{.spec.template.spec.containers[0].command}'| jq -r '.[]') + if ! echo "$wlCommand" | grep -Fxq -- "--kubectl-parallelism-limit"; then echo "Incorrect or missing --kubectl-parallelism-limit detected." echo "$wlCommand" exit 1 - fi \ No newline at end of file + fi + if ! echo "$wlCommand" | grep -Fxq -- "$expected"; then + echo "Incorrect value for --kubectl-parallelism-limit detected." + echo "$wlCommand" + exit 1 + fi diff --git a/tests/k8s/1-022_validate_notifications/05-verify-email.yaml b/tests/k8s/1-022_validate_notifications/05-verify-email.yaml index 239621b07..97f5c7381 100644 --- a/tests/k8s/1-022_validate_notifications/05-verify-email.yaml +++ b/tests/k8s/1-022_validate_notifications/05-verify-email.yaml @@ -2,16 +2,7 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - script: | - set -e - smtp4dev_pod=$(kubectl get pod -l=app=smtp4dev -o NAME -n $NAMESPACE) - exit_code=$(kubectl -n $NAMESPACE exec -i --stdin "${smtp4dev_pod}" -- /bin/bash \ - -c 'if [[ $(grep -rnw /tmp -e "Subject: Application my-app-3 has been created.") ]]; then - exit 0; else - exit 1; - fi') + set -euo pipefail - if [ $exit_code=0 ]; then - exit 0 - else - exit 1 - fi \ No newline at end of file + smtp4dev_pod=$(kubectl get pod -l=app=smtp4dev -o jsonpath='{.items[0].metadata.name}' -n $NAMESPACE) + kubectl -n $NAMESPACE exec -i "${smtp4dev_pod}" -- /bin/bash -c 'grep -rnw /tmp -e "Subject: Application my-app-3 has been created."' diff --git a/tests/k8s/1-024_validate_apps_in_any_namespace/03-delete-argocd.yaml b/tests/k8s/1-024_validate_apps_in_any_namespace/03-delete-argocd.yaml index 8f02301dc..fe41808c5 100644 --- a/tests/k8s/1-024_validate_apps_in_any_namespace/03-delete-argocd.yaml +++ b/tests/k8s/1-024_validate_apps_in_any_namespace/03-delete-argocd.yaml @@ -1,4 +1,3 @@ ---- apiVersion: kuttl.dev/v1beta1 kind: TestStep delete: @@ -6,6 +5,11 @@ delete: kind: ArgoCD name: example-argocd namespace: central-argocd -commands: - # Sleep to allow resources to be completely deleted - - command: sleep 30s \ No newline at end of file +wait: +- condition: delete + timeout: 30s + selector: + apiVersion: argoproj.io/v1alpha1 + kind: ArgoCD + name: example-argocd + namespace: central-argocd diff --git a/tests/k8s/1-024_validate_apps_in_any_namespace/99-delete.yaml b/tests/k8s/1-024_validate_apps_in_any_namespace/99-delete.yaml index 0d87c9379..b0dd05839 100644 --- a/tests/k8s/1-024_validate_apps_in_any_namespace/99-delete.yaml +++ b/tests/k8s/1-024_validate_apps_in_any_namespace/99-delete.yaml @@ -11,5 +11,3 @@ delete: - apiVersion: v1 kind: Namespace name: central-argocd -commands: - - command: sleep 30s \ No newline at end of file diff --git a/tests/k8s/1-024_validate_apps_in_any_namespace/99-errors.yaml b/tests/k8s/1-024_validate_apps_in_any_namespace/99-errors.yaml new file mode 100644 index 000000000..05de49f4c --- /dev/null +++ b/tests/k8s/1-024_validate_apps_in_any_namespace/99-errors.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: test-1-24-custom +--- +apiVersion: v1 +kind: Namespace +metadata: + name: test-2-24-custom +--- +apiVersion: v1 +kind: Namespace +metadata: + name: central-argocd \ No newline at end of file