Skip to content

Commit

Permalink
Add HA support for Argo Rollouts
Browse files Browse the repository at this point in the history
Signed-off-by: Rizwana777 <[email protected]>
  • Loading branch information
Rizwana777 committed Sep 26, 2024
1 parent b3a99a6 commit 780422f
Show file tree
Hide file tree
Showing 7 changed files with 216 additions and 0 deletions.
8 changes: 8 additions & 0 deletions api/v1alpha1/argorollouts_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,14 @@ type RolloutManagerSpec struct {

// SkipNotificationSecretDeployment lets you specify if the argo notification secret should be deployed
SkipNotificationSecretDeployment bool `json:"skipNotificationSecretDeployment,omitempty"`

// HA options for High Availability support for Rollouts.
HA RolloutManagerHASpec `json:"ha,omitempty"`
}

type RolloutManagerHASpec struct {
// Enabled will toggle HA support globally for RolloutManager.
Enabled bool `json:"enabled"`
}

// ArgoRolloutsNodePlacementSpec is used to specify NodeSelector and Tolerations for Rollouts workloads
Expand Down
16 changes: 16 additions & 0 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 9 additions & 0 deletions config/crd/bases/argoproj.io_rolloutmanagers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,15 @@ spec:
items:
type: string
type: array
ha:
description: HA options for High Availability support for Rollouts.
properties:
enabled:
description: Enabled will toggle HA support globally for RolloutManager.
type: boolean
required:
- enabled
type: object
image:
description: Image defines Argo Rollouts controller image (optional)
type: string
Expand Down
4 changes: 4 additions & 0 deletions controllers/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,8 @@ const (
// NamespaceScopedArgoRolloutsController is an environment variable that can be used to configure scope of Argo Rollouts controller
// Set true to allow only namespace-scoped Argo Rollouts controller deployment and false for cluster-scoped
NamespaceScopedArgoRolloutsController = "NAMESPACE_SCOPED_ARGO_ROLLOUTS"

KubernetesHostnameLabel = "kubernetes.io/hostname"

TopologyKubernetesZoneLabel = "topology.kubernetes.io/zone"
)
77 changes: 77 additions & 0 deletions controllers/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,14 @@ func generateDesiredRolloutsDeployment(cr rolloutsmanagerv1alpha1.RolloutManager
}
}

// Default number of replicas is 1, update it to 2 if HA is enabled
var replicas int32 = 1
if cr.Spec.HA.Enabled {
replicas = 2
}

desiredDeployment.Spec = appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Expand All @@ -63,6 +70,32 @@ func generateDesiredRolloutsDeployment(cr rolloutsmanagerv1alpha1.RolloutManager
},
}

if cr.Spec.HA.Enabled {
desiredDeployment.Spec.Template.Spec.Affinity = &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: TopologyKubernetesZoneLabel,
},
Weight: int32(100),
},
},
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: KubernetesHostnameLabel,
},
},
},
}
}

if cr.Spec.NodePlacement != nil {
desiredDeployment.Spec.Template.Spec.NodeSelector = appendStringMap(
desiredDeployment.Spec.Template.Spec.NodeSelector, cr.Spec.NodePlacement.NodeSelector)
Expand Down Expand Up @@ -159,6 +192,9 @@ func (r *RolloutManagerReconciler) reconcileRolloutsDeployment(ctx context.Conte
actualDeployment.Spec.Template.Spec.Containers = desiredDeployment.Spec.Template.Spec.Containers
actualDeployment.Spec.Template.Spec.ServiceAccountName = desiredDeployment.Spec.Template.Spec.ServiceAccountName

actualDeployment.Spec.Replicas = desiredDeployment.Spec.Replicas
actualDeployment.Spec.Template.Spec.Affinity = desiredDeployment.Spec.Template.Spec.Affinity

actualDeployment.Labels = combineStringMaps(actualDeployment.Labels, desiredDeployment.Labels)
actualDeployment.Annotations = combineStringMaps(actualDeployment.Annotations, desiredDeployment.Annotations)

Expand Down Expand Up @@ -236,6 +272,10 @@ func identifyDeploymentDifference(x appsv1.Deployment, y appsv1.Deployment) stri
return "Spec.Template.Spec.Volumes"
}

if !reflect.DeepEqual(x.Spec.Replicas, y.Spec.Replicas) {
return "Spec.Replicas"
}

return ""
}

Expand Down Expand Up @@ -389,7 +429,14 @@ func normalizeDeployment(inputParam appsv1.Deployment, cr rolloutsmanagerv1alpha
return appsv1.Deployment{}, fmt.Errorf("missing .spec.template.spec.volumes")
}

// Default number of replicas is 1, update it to 2 if HA is enabled
var replicas int32 = 1
if cr.Spec.HA.Enabled {
replicas = 2
}

res.Spec = appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: normalizeMap(input.Spec.Selector.MatchLabels),
},
Expand Down Expand Up @@ -462,6 +509,32 @@ func normalizeDeployment(inputParam appsv1.Deployment, cr rolloutsmanagerv1alpha
inputContainer.Env = make([]corev1.EnvVar, 0)
}

if cr.Spec.HA.Enabled {
res.Spec.Template.Spec.Affinity = &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: normalizeMap(input.Spec.Selector.MatchLabels),
},
TopologyKey: TopologyKubernetesZoneLabel,
},
Weight: int32(100),
},
},
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: normalizeMap(input.Spec.Selector.MatchLabels),
},
TopologyKey: KubernetesHostnameLabel,
},
},
},
}
}

res.Spec.Template.Spec.Containers = []corev1.Container{{
Args: inputContainer.Args,
Env: inputContainer.Env,
Expand Down Expand Up @@ -575,6 +648,10 @@ func getRolloutsCommandArgs(cr rolloutsmanagerv1alpha1.RolloutManager) []string
args = append(args, "--namespaced")
}

if cr.Spec.HA.Enabled {
args = append(args, "--leader-elect", "true")
}

extraArgs := cr.Spec.ExtraCommandArgs
err := isMergable(extraArgs, args)
if err != nil {
Expand Down
43 changes: 43 additions & 0 deletions controllers/deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,44 @@ var _ = Describe("Deployment Test", func() {
}),
)

It("should contain two replicas and the --leader-elect argument set to true, and verify that the anti-affinity rule is added by default when HA is enabled", func() {
a.Spec.HA.Enabled = true
replicas := int32(2)

By("calling reconcileRolloutsDeployment to create the initial set of rollout resources")
Expect(r.reconcileRolloutsDeployment(ctx, a, *sa)).To(Succeed())

By("fetch the Deployment")
fetchedDeployment := &appsv1.Deployment{}
Expect(fetchObject(ctx, r.Client, a.Namespace, DefaultArgoRolloutsResourceName, fetchedDeployment)).To(Succeed())

expectedDeployment := deploymentCR(DefaultArgoRolloutsResourceName, a.Namespace, DefaultArgoRolloutsResourceName, []string{"plugin-bin", "tmp"}, "linux", DefaultArgoRolloutsResourceName, a)

By("verify that the fetched Deployment matches the desired one")
Expect(fetchedDeployment.Name).To(Equal(expectedDeployment.Name))
Expect(fetchedDeployment.Labels).To(Equal(expectedDeployment.Labels))
Expect(fetchedDeployment.Spec.Replicas).To(Equal(&replicas))
Expect(fetchedDeployment.Spec.Template.Spec.Containers[0].Args).To(ContainElements("--leader-elect", "true"))

By("verifying that the anti-affinity rules are set correctly")
affinity := fetchedDeployment.Spec.Template.Spec.Affinity
Expect(affinity).NotTo(BeNil())
Expect(affinity.PodAntiAffinity).NotTo(BeNil())

By("Verify PreferredDuringSchedulingIgnoredDuringExecution")
preferred := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
Expect(preferred).To(HaveLen(1))
Expect(preferred[0].Weight).To(Equal(int32(100)))
Expect(preferred[0].PodAffinityTerm.TopologyKey).To(Equal(TopologyKubernetesZoneLabel))
Expect(preferred[0].PodAffinityTerm.LabelSelector.MatchLabels).To(Equal(normalizeMap(fetchedDeployment.Spec.Selector.MatchLabels)))

By("Verify RequiredDuringSchedulingIgnoredDuringExecution")
required := affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
Expect(required).To(HaveLen(1))
Expect(required[0].TopologyKey).To(Equal(KubernetesHostnameLabel))
Expect(required[0].LabelSelector.MatchLabels).To(Equal(normalizeMap(fetchedDeployment.Spec.Selector.MatchLabels)))
})

})

})
Expand All @@ -429,7 +467,12 @@ func deploymentCR(name string, namespace string, rolloutsSelectorLabel string, v
},
}
setRolloutsLabelsAndAnnotationsToObject(&deploymentCR.ObjectMeta, rolloutManager)
replicas := int32(1)
if rolloutManager.Spec.HA.Enabled {
replicas = 2
}
deploymentCR.Spec = appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
DefaultRolloutsSelectorKey: rolloutsSelectorLabel,
Expand Down
59 changes: 59 additions & 0 deletions tests/e2e/cluster-scoped/cluster_scoped_rollouts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (

controllers "github.com/argoproj-labs/argo-rollouts-manager/controllers"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
Expand Down Expand Up @@ -324,5 +325,63 @@ var _ = Describe("Cluster-scoped RolloutManager tests", func() {
By("2nd RM: Verify that Status.Condition is having success condition.")
Eventually(rolloutsManagerCl2, "1m", "1s").Should(rmFixture.HaveSuccessCondition())
})

It("should contain two replicas and the '--leader-elect' argument set to true, and verify that the anti-affinity rule is added by default when HA is enabled", func() {
By("Create cluster-scoped RolloutManager in a namespace.")
rolloutsManager := rmv1alpha1.RolloutManager{
ObjectMeta: metav1.ObjectMeta{
Name: "test-rollouts-manager",
Namespace: fixture.TestE2ENamespace,
},
Spec: rmv1alpha1.RolloutManagerSpec{
NamespaceScoped: false,
HA: rmv1alpha1.RolloutManagerHASpec{
Enabled: true,
},
},
}

Expect(k8sClient.Create(ctx, &rolloutsManager)).To(Succeed())
Expect(err).ToNot(HaveOccurred())

By("Verify that RolloutManager is successfully created.")
Eventually(rolloutsManager, "1m", "1s").Should(rmFixture.HavePhase(rmv1alpha1.PhaseAvailable))

By("Verify that Status.Condition is having success condition.")
Eventually(rolloutsManager, "1m", "1s").Should(rmFixture.HaveSuccessCondition())

depl := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: controllers.DefaultArgoRolloutsResourceName,
Namespace: fixture.TestE2ENamespace,
},
}
Eventually(&depl, "10s", "1s").Should(k8s.ExistByName(k8sClient))

replicas := int32(2)
Expect(depl.Spec.Replicas).To(Equal(&replicas))
Expect(depl.Spec.Template.Spec.Containers[0].Args).To(ContainElements("--leader-elect", "true"))

By("verifying that the anti-affinity rules are set correctly")
affinity := depl.Spec.Template.Spec.Affinity
Expect(affinity).NotTo(BeNil())
Expect(affinity.PodAntiAffinity).NotTo(BeNil())

By("Verify PreferredDuringSchedulingIgnoredDuringExecution")
preferred := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
Expect(preferred).To(HaveLen(1))
Expect(preferred[0].Weight).To(Equal(int32(100)))
Expect(preferred[0].PodAffinityTerm.TopologyKey).To(Equal(controllers.TopologyKubernetesZoneLabel))
Expect(preferred[0].PodAffinityTerm.LabelSelector.MatchLabels).To(Equal(depl.Spec.Selector.MatchLabels))

By("Verify RequiredDuringSchedulingIgnoredDuringExecution")
required := affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
Expect(required).To(HaveLen(1))
Expect(required[0].TopologyKey).To(Equal(controllers.KubernetesHostnameLabel))
Expect(required[0].LabelSelector.MatchLabels).To(Equal(depl.Spec.Selector.MatchLabels))

By("Delete RolloutManager.")
Expect(k8sClient.Delete(ctx, &rolloutsManager)).To(Succeed())
})
})
})

0 comments on commit 780422f

Please sign in to comment.