From 7b22d0ebb52e50ad533429abc2344cadc7283535 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Mon, 24 Nov 2025 14:48:08 -0600 Subject: [PATCH 1/9] rough draft of Start/Stop API Implementation Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/updaterun/controller.go | 106 ++++- .../updaterun/controller_integration_test.go | 54 ++- pkg/controllers/updaterun/execution.go | 63 ++- .../updaterun/execution_integration_test.go | 431 +++++++++++++++++- .../initialization_integration_test.go | 50 ++ pkg/controllers/updaterun/validation.go | 11 +- .../updaterun/validation_integration_test.go | 3 +- pkg/utils/condition/reason.go | 6 + test/e2e/actuals_test.go | 374 ++++++++++++++- test/e2e/cluster_staged_updaterun_test.go | 179 +++++++- test/e2e/staged_updaterun_test.go | 164 ++++++- 11 files changed, 1313 insertions(+), 128 deletions(-) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 4e8fa695c..d4040538b 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -104,11 +104,26 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // Emit the update run status metric based on status conditions in the updateRun. defer emitUpdateRunStatusMetric(updateRun) + state := updateRun.GetUpdateRunSpec().State + switch state { // Early check for abandoned state - this is a terminal state, no initialization needed. + case placementv1beta1.StateAbandoned: + klog.V(2).InfoS("The updateRun is abandoned, terminating", "state", state, "updateRun", runObjRef) + return runtime.Result{}, r.recordUpdateRunAbandoned(ctx, updateRun) + case placementv1beta1.StateStopped: // Early check for stopped state - pause the update run if needed. + klog.V(2).InfoS("The updateRun is paused, waiting to resume", "state", state, "updateRun", runObjRef) + return runtime.Result{}, r.recordUpdateRunPaused(ctx, updateRun) + } + var updatingStageIndex int var toBeUpdatedBindings, toBeDeletedBindings []placementv1beta1.BindingObj updateRunStatus := updateRun.GetUpdateRunStatus() initCond := meta.FindStatusCondition(updateRunStatus.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) - if !condition.IsConditionStatusTrue(initCond, updateRun.GetGeneration()) { + // Check if initialized regardless of generation. + // The updateRun spec fields are immutable except for the state field. When the state changes, + // the update run generation increments, but we don't need to reinitialize since initialization is a one-time setup. + isInitialized := initCond != nil && initCond.Status == metav1.ConditionTrue + if !isInitialized { + // Check if initialization failed for the current generation. if condition.IsConditionStatusFalse(initCond, updateRun.GetGeneration()) { klog.V(2).InfoS("The updateRun has failed to initialize", "errorMsg", initCond.Message, "updateRun", runObjRef) return runtime.Result{}, nil @@ -122,7 +137,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } return runtime.Result{}, initErr } - updatingStageIndex = 0 // start from the first stage. + updatingStageIndex = 0 // start from the first stage (typically for NotStarted or Started states). klog.V(2).InfoS("Initialized the updateRun", "updateRun", runObjRef) } else { klog.V(2).InfoS("The updateRun is initialized", "updateRun", runObjRef) @@ -134,6 +149,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } var validateErr error // Validate the updateRun status to ensure the update can be continued and get the updating stage index and cluster indices. + // For Stopped → Started transition, this will resume from where it left off. if updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings, validateErr = r.validate(ctx, updateRun); validateErr != nil { // errStagedUpdatedAborted cannot be retried. if errors.Is(validateErr, errStagedUpdatedAborted) { @@ -151,28 +167,32 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } // Execute the updateRun. - klog.V(2).InfoS("Continue to execute the updateRun", "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) - finished, waitTime, execErr := r.execute(ctx, updateRun, updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings) - if errors.Is(execErr, errStagedUpdatedAborted) { - // errStagedUpdatedAborted cannot be retried. - return runtime.Result{}, r.recordUpdateRunFailed(ctx, updateRun, execErr.Error()) - } + if state == placementv1beta1.StateStarted { + klog.V(2).InfoS("Continue to execute the updateRun", "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) + finished, waitTime, execErr := r.execute(ctx, updateRun, updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings) + if errors.Is(execErr, errStagedUpdatedAborted) { + // errStagedUpdatedAborted cannot be retried. + return runtime.Result{}, r.recordUpdateRunFailed(ctx, updateRun, execErr.Error()) + } - if finished { - klog.V(2).InfoS("The updateRun is completed", "updateRun", runObjRef) - return runtime.Result{}, r.recordUpdateRunSucceeded(ctx, updateRun) - } + if finished { + klog.V(2).InfoS("The updateRun is completed", "updateRun", runObjRef) + return runtime.Result{}, r.recordUpdateRunSucceeded(ctx, updateRun) + } - // The execution is not finished yet or it encounters a retriable error. - // We need to record the status and requeue. - if updateErr := r.recordUpdateRunStatus(ctx, updateRun); updateErr != nil { - return runtime.Result{}, updateErr - } - klog.V(2).InfoS("The updateRun is not finished yet", "requeueWaitTime", waitTime, "execErr", execErr, "updateRun", runObjRef) - if execErr != nil { - return runtime.Result{}, execErr + // The execution is not finished yet or it encounters a retriable error. + // We need to record the status and requeue. + if updateErr := r.recordUpdateRunStatus(ctx, updateRun); updateErr != nil { + return runtime.Result{}, updateErr + } + klog.V(2).InfoS("The updateRun is not finished yet", "requeueWaitTime", waitTime, "execErr", execErr, "updateRun", runObjRef) + if execErr != nil { + return runtime.Result{}, execErr + } + return runtime.Result{RequeueAfter: waitTime}, nil } - return runtime.Result{Requeue: true, RequeueAfter: waitTime}, nil + klog.V(2).InfoS("The updateRun is not started, waiting to be started", "state", state, "updateRun", runObjRef) + return runtime.Result{}, nil } // handleDelete handles the deletion of the updateRun object. @@ -265,6 +285,50 @@ func (r *Reconciler) recordUpdateRunFailed(ctx context.Context, updateRun placem return nil } +// recordUpdateRunPaused records the progressing condition as paused in the updateRun status. +func (r *Reconciler) recordUpdateRunPaused(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { + updateRunStatus := updateRun.GetUpdateRunStatus() + meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ + Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), + Status: metav1.ConditionFalse, + ObservedGeneration: updateRun.GetGeneration(), + Reason: condition.UpdateRunPausedReason, + Message: "The update run is paused", + }) + if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { + klog.ErrorS(updateErr, "Failed to update the updateRun status as paused", "updateRun", klog.KObj(updateRun)) + // updateErr can be retried. + return controller.NewUpdateIgnoreConflictError(updateErr) + } + return nil +} + +// recordUpdateRunAbandoned records the succeeded and progressing condition as abandoned in the updateRun status. +func (r *Reconciler) recordUpdateRunAbandoned(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { + updateRunStatus := updateRun.GetUpdateRunStatus() + meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ + Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), + Status: metav1.ConditionFalse, + ObservedGeneration: updateRun.GetGeneration(), + Reason: condition.UpdateRunAbandonedReason, + Message: "The stages are aborted due to abandonment", + }) + meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ + Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), + Status: metav1.ConditionFalse, + ObservedGeneration: updateRun.GetGeneration(), + Reason: condition.UpdateRunAbandonedReason, + Message: "The update run has been abandoned", + }) + + if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { + klog.ErrorS(updateErr, "Failed to update the updateRun status as failed", "updateRun", klog.KObj(updateRun)) + // updateErr can be retried. + return controller.NewUpdateIgnoreConflictError(updateErr) + } + return nil +} + // recordUpdateRunStatus records the updateRun status. func (r *Reconciler) recordUpdateRunStatus(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index d33b132b3..c424c2ff9 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -272,6 +272,16 @@ func generateMetricsLabels( } } +func generateInitializationSucceededMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { + return &prometheusclientmodel.Metric{ + Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionInitialized), + string(metav1.ConditionTrue), condition.UpdateRunInitializeSucceededReason), + Gauge: &prometheusclientmodel.Gauge{ + Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), + }, + } +} + func generateInitializationFailedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionInitialized), @@ -312,6 +322,26 @@ func generateStuckMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *pr } } +func generatePausedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { + return &prometheusclientmodel.Metric{ + Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), + string(metav1.ConditionFalse), condition.UpdateRunPausedReason), + Gauge: &prometheusclientmodel.Gauge{ + Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), + }, + } +} + +func generateAbandonedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { + return &prometheusclientmodel.Metric{ + Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionSucceeded), + string(metav1.ConditionFalse), condition.UpdateRunAbandonedReason), + Gauge: &prometheusclientmodel.Gauge{ + Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), + }, + } +} + func generateFailedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionSucceeded), @@ -341,6 +371,7 @@ func generateTestClusterStagedUpdateRun() *placementv1beta1.ClusterStagedUpdateR PlacementName: testCRPName, ResourceSnapshotIndex: testResourceSnapshotIndex, StagedUpdateStrategyName: testUpdateStrategyName, + State: placementv1beta1.StateStarted, }, } } @@ -807,23 +838,14 @@ func generateFalseCondition(obj client.Object, condType any) metav1.Condition { } } -func generateFalseProgressingCondition(obj client.Object, condType any, succeeded bool) metav1.Condition { +func generateFalseProgressingCondition(obj client.Object, condType any, reason string) metav1.Condition { + falseCond := generateFalseCondition(obj, condType) + falseCond.Reason = reason + return falseCond +} + +func generateFalseSucceededCondition(obj client.Object, condType any, reason string) metav1.Condition { falseCond := generateFalseCondition(obj, condType) - reason := "" - switch condType { - case placementv1beta1.StagedUpdateRunConditionProgressing: - if succeeded { - reason = condition.UpdateRunSucceededReason - } else { - reason = condition.UpdateRunFailedReason - } - case placementv1beta1.StageUpdatingConditionProgressing: - if succeeded { - reason = condition.StageUpdatingSucceededReason - } else { - reason = condition.StageUpdatingFailedReason - } - } falseCond.Reason = reason return falseCond } diff --git a/pkg/controllers/updaterun/execution.go b/pkg/controllers/updaterun/execution.go index 074c23739..b998abd95 100644 --- a/pkg/controllers/updaterun/execution.go +++ b/pkg/controllers/updaterun/execution.go @@ -166,13 +166,13 @@ func (r *Reconciler) executeUpdatingStage( for i := 0; i < len(updatingStageStatus.Clusters) && clusterUpdatingCount < maxConcurrency; i++ { clusterStatus := &updatingStageStatus.Clusters[i] clusterUpdateSucceededCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) - if condition.IsConditionStatusTrue(clusterUpdateSucceededCond, updateRun.GetGeneration()) { + if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionTrue { // The cluster has been updated successfully. finishedClusterCount++ continue } clusterUpdatingCount++ - if condition.IsConditionStatusFalse(clusterUpdateSucceededCond, updateRun.GetGeneration()) { + if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionFalse { // The cluster is marked as failed to update, this cluster is counted as updating cluster since it's not finished to avoid processing more clusters than maxConcurrency in this round. failedErr := fmt.Errorf("the cluster `%s` in the stage %s has failed", clusterStatus.ClusterName, updatingStageStatus.StageName) klog.ErrorS(failedErr, "The cluster has failed to be updated", "updateRun", updateRunRef) @@ -182,7 +182,7 @@ func (r *Reconciler) executeUpdatingStage( // The cluster needs to be processed. clusterStartedCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)) binding := toBeUpdatedBindingsMap[clusterStatus.ClusterName] - if !condition.IsConditionStatusTrue(clusterStartedCond, updateRun.GetGeneration()) { + if clusterStartedCond == nil || clusterStartedCond.Status != metav1.ConditionTrue { // The cluster has not started updating yet. if !isBindingSyncedWithClusterStatus(resourceSnapshotName, updateRun, binding, clusterStatus) { klog.V(2).InfoS("Found the first cluster that needs to be updated", "cluster", clusterStatus.ClusterName, "stage", updatingStageStatus.StageName, "updateRun", updateRunRef) @@ -285,31 +285,45 @@ func (r *Reconciler) executeUpdatingStage( } if finishedClusterCount == len(updatingStageStatus.Clusters) { - // All the clusters in the stage have been updated. - markUpdateRunWaiting(updateRun, fmt.Sprintf(condition.UpdateRunWaitingMessageFmt, "after-stage", updatingStageStatus.StageName)) - markStageUpdatingWaiting(updatingStageStatus, updateRun.GetGeneration(), "All clusters in the stage are updated, waiting for after-stage tasks to complete") - klog.V(2).InfoS("The stage has finished all cluster updating", "stage", updatingStageStatus.StageName, "updateRun", updateRunRef) - // Check if the after stage tasks are ready. - approved, waitTime, err := r.checkAfterStageTasksStatus(ctx, updatingStageIndex, updateRun) - if err != nil { - return 0, err - } - if approved { - markUpdateRunProgressing(updateRun) - markStageUpdatingSucceeded(updatingStageStatus, updateRun.GetGeneration()) - // No need to wait to get to the next stage. - return 0, nil - } - // The after stage tasks are not ready yet. - if waitTime < 0 { - waitTime = stageUpdatingWaitTime - } - return waitTime, nil + return r.handleStageCompletion(ctx, updatingStageIndex, updateRun, updatingStageStatus) } + // Some clusters are still updating. return clusterUpdatingWaitTime, nil } +// handleStageCompletion handles the completion logic when all clusters in a stage are finished. +// Returns the wait time and any error encountered. +func (r *Reconciler) handleStageCompletion( + ctx context.Context, + updatingStageIndex int, + updateRun placementv1beta1.UpdateRunObj, + updatingStageStatus *placementv1beta1.StageUpdatingStatus, +) (time.Duration, error) { + updateRunRef := klog.KObj(updateRun) + + // All the clusters in the stage have been updated. + markUpdateRunWaiting(updateRun, fmt.Sprintf(condition.UpdateRunWaitingMessageFmt, "after-stage", updatingStageStatus.StageName)) + markStageUpdatingWaiting(updatingStageStatus, updateRun.GetGeneration(), "All clusters in the stage are updated, waiting for after-stage tasks to complete") + klog.V(2).InfoS("The stage has finished all cluster updating", "stage", updatingStageStatus.StageName, "updateRun", updateRunRef) + // Check if the after stage tasks are ready. + approved, waitTime, err := r.checkAfterStageTasksStatus(ctx, updatingStageIndex, updateRun) + if err != nil { + return 0, err + } + if approved { + markUpdateRunProgressing(updateRun) + markStageUpdatingSucceeded(updatingStageStatus, updateRun.GetGeneration()) + // No need to wait to get to the next stage. + return 0, nil + } + // The after stage tasks are not ready yet. + if waitTime < 0 { + waitTime = stageUpdatingWaitTime + } + return waitTime, nil +} + // executeDeleteStage executes the delete stage by deleting the bindings. func (r *Reconciler) executeDeleteStage( ctx context.Context, @@ -337,7 +351,8 @@ func (r *Reconciler) executeDeleteStage( // In validation, we already check the binding must exist in the status. delete(existingDeleteStageClusterMap, bindingSpec.TargetCluster) // Make sure the cluster is not marked as deleted as the binding is still there. - if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)), updateRun.GetGeneration()) { + clusterDeleteSucceededCond := meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) + if clusterDeleteSucceededCond != nil && clusterDeleteSucceededCond.Status == metav1.ConditionTrue { unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("the deleted cluster `%s` in the deleting stage still has a binding", bindingSpec.TargetCluster)) klog.ErrorS(unexpectedErr, "The cluster in the deleting stage is not removed yet but marked as deleted", "cluster", curCluster.ClusterName, "updateRun", updateRunRef) return false, fmt.Errorf("%w: %s", errStagedUpdatedAborted, unexpectedErr.Error()) diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index 61764f378..62b731939 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + io_prometheus_client "github.com/prometheus/client_model/go" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -344,7 +345,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) // 1st stage completed, mark progressing condition reason as succeeded and add succeeded condition. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // 2nd stage waiting for before stage tasks. wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) @@ -548,7 +549,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) meta.SetStatusCondition(&wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) @@ -605,10 +606,10 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) } // Mark the stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -691,9 +692,9 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { By("Validating the updateRun has failed") wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, false) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingFailedReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason) wantStatus.Conditions = append(wantStatus.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -864,13 +865,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -970,13 +971,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1102,13 +1103,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1198,13 +1199,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1483,13 +1484,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 1st stage has completed") wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true)) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) // Need to have a longer wait time for the test to pass, because of the long wait time specified in the update strategy. timeout = time.Second * 90 @@ -1516,6 +1517,394 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { }, timeout, interval).Should(BeTrue(), "failed to ensure the approvalRequest is not recreated") }) }) + + Context("Cluster staged update run should NOT update clusters one by one - different states (NotStarted -> Abandoned)", Ordered, func() { + var wantMetrics []*io_prometheus_client.Metric + BeforeAll(func() { + By("Creating a new clusterStagedUpdateRun") + updateRun.Spec.State = placementv1beta1.StateNotStarted + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the initialization succeeded and but not execution started") + wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not start execution when the state is NotStarted", func() { + By("Validating no execution has started") + Consistently(func() bool { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return false + } + return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && + meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil + }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") + + By("Validating the 1st clusterResourceBinding is updated to NOT Bound") + binding := resourceBindings[0] // cluster-0 + validateNotBindingState(ctx, binding) + }) + + It("Should not continue further after changing the state to Abandoned", func() { + By("Updating the updateRun state to Abandoned") + updateRun.Spec.State = placementv1beta1.StateAbandoned + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has not started") + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + }) + + Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started-> Abandoned)", Ordered, func() { + var wantMetrics []*io_prometheus_client.Metric + BeforeAll(func() { + By("Creating a new clusterStagedUpdateRun") + updateRun.Spec.State = placementv1beta1.StateNotStarted + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the initialization succeeded and but not execution started") + wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not start execution when the state is NotStarted", func() { + By("Validating no execution has started") + Consistently(func() bool { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return false + } + return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && + meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil + }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") + + By("Validating the 1st clusterResourceBinding is updated to NOT Bound") + binding := resourceBindings[0] // cluster-0 + validateNotBindingState(ctx, binding) + }) + + It("Should start execution after changing the state to Started", func() { + By("Updating the updateRun state to Started") + updateRun.Spec.State = placementv1beta1.StateStarted + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has started") + wantStatus = generateExecutionStartedStatus(updateRun, wantStatus) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should mark the 1st cluster in the 1st stage as succeeded after marking the binding available", func() { + By("Validating the 1st clusterResourceBinding is updated to Bound") + binding := resourceBindings[0] // cluster-0 + validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + + By("Updating the 1st clusterResourceBinding to Available") + meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) + Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") + + By("Validating the 1st cluster has succeeded and 2nd cluster has started") + wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Validating the 1st stage has startTime set") + Expect(updateRun.Status.StagesStatus[0].StartTime).ShouldNot(BeNil()) + + By("Checking update run status metrics are emitted") + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should abandon execution after changing the state to Abandoned", func() { + By("Updating the updateRun state to Abandoned") + updateRun.Spec.State = placementv1beta1.StateAbandoned + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has been abandoned") + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) + wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + }) + + Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { + var wantMetrics []*io_prometheus_client.Metric + BeforeAll(func() { + By("Creating a new clusterStagedUpdateRun") + updateRun.Spec.State = placementv1beta1.StateNotStarted + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the initialization succeeded and but not execution started") + wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not start execution when the state is NotStarted", func() { + By("Validating no execution has started") + Consistently(func() bool { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return false + } + return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && + meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil + }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") + + By("Validating the 1st clusterResourceBinding is updated to NOT Bound") + binding := resourceBindings[0] // cluster-0 + validateNotBindingState(ctx, binding) + }) + + It("Should start execution after changing the state to Started", func() { + By("Updating the updateRun state to Started") + updateRun.Spec.State = placementv1beta1.StateStarted + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has started") + wantStatus = generateExecutionStartedStatus(updateRun, wantStatus) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should mark the 1st cluster in the 1st stage as succeeded after marking the binding available", func() { + By("Validating the 1st clusterResourceBinding is updated to Bound") + binding := resourceBindings[0] // cluster-0 + validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + + By("Updating the 1st clusterResourceBinding to Available") + meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) + Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") + + By("Validating the 1st cluster has succeeded and 2nd cluster has started") + wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Validating the 1st stage has startTime set") + Expect(updateRun.Status.StagesStatus[0].StartTime).ShouldNot(BeNil()) + + By("Checking update run status metrics are emitted") + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should stop execution after changing the state to Stopped", func() { + By("Updating the updateRun state to Stopped") + updateRun.Spec.State = placementv1beta1.StateStopped + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has stopped at the 2nd cluster of the 1st stage") + generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunPausedReason)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generatePausedMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not continue execution when the state is Stopped", func() { + By("Validating no execution has started") + Consistently(func() bool { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return false + } + updateRunStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) + return condition.IsConditionStatusFalse(updateRunStatusCond, currentUpdateRun.Generation) + }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") + }) + + It("Should abandon execution after changing the state to Abandoned", func() { + By("Updating the updateRun state to Abandoned") + updateRun.Spec.State = placementv1beta1.StateAbandoned + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has been abandoned") + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) + wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + }) + + Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started -> Stopped -> Started -> Abandoned)", Ordered, func() { + var wantMetrics []*io_prometheus_client.Metric + BeforeAll(func() { + By("Creating a new clusterStagedUpdateRun") + updateRun.Spec.State = placementv1beta1.StateNotStarted + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the initialization succeeded and but not execution started") + wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not start execution when the state is NotStarted", func() { + By("Validating no execution has started") + Consistently(func() bool { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return false + } + return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && + meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil + }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") + + By("Validating the 1st clusterResourceBinding is updated to NOT Bound") + binding := resourceBindings[0] // cluster-0 + validateNotBindingState(ctx, binding) + }) + + It("Should start execution after changing the state to Started", func() { + By("Updating the updateRun state to Started") + updateRun.Spec.State = placementv1beta1.StateStarted + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has started") + wantStatus = generateExecutionStartedStatus(updateRun, wantStatus) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should mark the 1st cluster in the 1st stage as succeeded after marking the binding available", func() { + By("Validating the 1st clusterResourceBinding is updated to Bound") + binding := resourceBindings[0] // cluster-0 + validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + + By("Updating the 1st clusterResourceBinding to Available") + meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) + Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") + + By("Validating the 1st cluster has succeeded and 2nd cluster has started") + wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Validating the 1st stage has startTime set") + Expect(updateRun.Status.StagesStatus[0].StartTime).ShouldNot(BeNil()) + + By("Checking update run status metrics are emitted") + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should stop execution after changing the state to Stopped", func() { + By("Updating the updateRun state to Stopped") + updateRun.Spec.State = placementv1beta1.StateStopped + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has stopped at the 2nd cluster of the 1st stage") + generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunPausedReason)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generatePausedMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should not continue execution when the state is Stopped", func() { + By("Validating no execution has started") + Consistently(func() error { + var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun + if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { + return err + } + updateRunStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) + if condition.IsConditionStatusTrue(updateRunStatusCond, currentUpdateRun.Generation) { + return fmt.Errorf("update run progressing condition is true unexpectedly") + } + updateRunClusterStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Clusters[1].Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) + if updateRunClusterStatusCond != nil { + return fmt.Errorf("2nd cluster in 1st stage succeeded condition is set unexpectedly") + } + return nil + }, timeout, interval).Should(BeNil(), "execution has started unexpectedly") + }) + + It("Should continue execution after changing the state to Started", func() { + By("Updating the updateRun state to Started") + updateRun.Spec.State = placementv1beta1.StateStarted + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has started") + // UpdateRun is already initialized, so only need to set the progressing condition to true. + meta.SetStatusCondition(&wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should mark the 2nd cluster in the 1st stage as succeeded after marking the binding available", func() { + By("Validating the 2nd clusterResourceBinding is updated to Bound") + binding := resourceBindings[1] // cluster-1 + validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + + By("Updating the 2nd clusterResourceBinding to Available") + meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) + Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") + + By("Validating the 2nd cluster has succeeded and 3rd cluster has started") + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + + It("Should abandon execution after changing the state to Abandoned", func() { + By("Updating the updateRun state to Abandoned") + updateRun.Spec.State = placementv1beta1.StateAbandoned + Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + + By("Validating the execution has been abandoned") + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) + wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) + validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + + By("Checking update run status metrics are emitted") + wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) + validateUpdateRunMetricsEmitted(wantMetrics...) + }) + }) }) func validateBindingState(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, resourceSnapshotName string, updateRun *placementv1beta1.ClusterStagedUpdateRun, stage int) { diff --git a/pkg/controllers/updaterun/initialization_integration_test.go b/pkg/controllers/updaterun/initialization_integration_test.go index 0351b8a77..35c10eff5 100644 --- a/pkg/controllers/updaterun/initialization_integration_test.go +++ b/pkg/controllers/updaterun/initialization_integration_test.go @@ -941,6 +941,56 @@ var _ = Describe("Updaterun initialization tests", func() { validateUpdateRunMetricsEmitted(generateWaitingMetric(updateRun)) }) }) + + It("Should not initialize if updateRun is created with state Abandoned", func() { + By("Creating a new clusterStagedUpdateRun in Abandoned state") + updateRun.Spec.State = placementv1beta1.StateAbandoned + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the updateRun is not initialized") + // Populate the cache first. + Eventually(func() error { + if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { + return err + } + return nil + }, timeout, interval).Should(Succeed(), "failed to get the updateRun") + Consistently(func() error { + if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { + return err + } + initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) + if initCond != nil { + return fmt.Errorf("got initialization condition: %v, want nil", initCond) + } + return nil + }, duration, interval).Should(Succeed(), "the abandoned updateRun should not be initialized") + }) + + It("Should not initialize if updateRun is created with state Stopped ", func() { + By("Creating a new clusterStagedUpdateRun in Stopped state") + updateRun.Spec.State = placementv1beta1.StateStopped + Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) + + By("Validating the updateRun is not initialized") + // Populate the cache first. + Eventually(func() error { + if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { + return err + } + return nil + }, timeout, interval).Should(Succeed(), "failed to get the updateRun") + Consistently(func() error { + if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { + return err + } + initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) + if initCond != nil { + return fmt.Errorf("got initialization condition: %v, want nil", initCond) + } + return nil + }, duration, interval).Should(Succeed(), "the stopped updateRun should not be initialized") + }) }) func validateFailedInitCondition(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun, message string) { diff --git a/pkg/controllers/updaterun/validation.go b/pkg/controllers/updaterun/validation.go index ffa5ea3c2..c92a946a9 100644 --- a/pkg/controllers/updaterun/validation.go +++ b/pkg/controllers/updaterun/validation.go @@ -22,6 +22,7 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -185,7 +186,7 @@ func validateClusterUpdatingStatus( ) (int, int, error) { stageSucceedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionSucceeded)) stageStartedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) - if condition.IsConditionStatusTrue(stageSucceedCond, updateRun.GetGeneration()) { + if stageSucceedCond != nil && stageSucceedCond.Status == metav1.ConditionTrue { // The stage has finished. if updatingStageIndex != -1 && curStage > updatingStageIndex { // The finished stage is after the updating stage. @@ -196,10 +197,8 @@ func validateClusterUpdatingStatus( // Make sure that all the clusters are updated. for curCluster := range stageStatus.Clusters { // Check if the cluster is still updating. - if !condition.IsConditionStatusTrue(meta.FindStatusCondition( - stageStatus.Clusters[curCluster].Conditions, - string(placementv1beta1.ClusterUpdatingConditionSucceeded)), - updateRun.GetGeneration()) { + clusterSucceededCond := meta.FindStatusCondition(stageStatus.Clusters[curCluster].Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) + if clusterSucceededCond == nil || clusterSucceededCond.Status == metav1.ConditionFalse { // The clusters in the finished stage should all have finished too. unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("cluster `%s` in the finished stage `%s` has not succeeded", stageStatus.Clusters[curCluster].ClusterName, stageStatus.StageName)) klog.ErrorS(unexpectedErr, "The cluster in a finished stage is still updating", "updateRun", klog.KObj(updateRun)) @@ -214,7 +213,7 @@ func validateClusterUpdatingStatus( } // Record the last finished stage so we can continue from the next stage if no stage is updating. lastFinishedStageIndex = curStage - } else if condition.IsConditionStatusFalse(stageSucceedCond, updateRun.GetGeneration()) { + } else if stageSucceedCond != nil && stageSucceedCond.Status == metav1.ConditionFalse { // The stage has failed. failedErr := fmt.Errorf("the stage `%s` has failed, err: %s", stageStatus.StageName, stageSucceedCond.Message) klog.ErrorS(failedErr, "The stage has failed", "stageCond", stageSucceedCond, "updateRun", klog.KObj(updateRun)) diff --git a/pkg/controllers/updaterun/validation_integration_test.go b/pkg/controllers/updaterun/validation_integration_test.go index b1190601e..b0d55f5f2 100644 --- a/pkg/controllers/updaterun/validation_integration_test.go +++ b/pkg/controllers/updaterun/validation_integration_test.go @@ -34,6 +34,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" ) var _ = Describe("UpdateRun validation tests", func() { @@ -564,7 +565,7 @@ func generateFailedValidationStatus( updateRun *placementv1beta1.ClusterStagedUpdateRun, started *placementv1beta1.UpdateRunStatus, ) *placementv1beta1.UpdateRunStatus { - started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false) + started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason) started.Conditions = append(started.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) return started } diff --git a/pkg/utils/condition/reason.go b/pkg/utils/condition/reason.go index 9566ee42e..14b778dea 100644 --- a/pkg/utils/condition/reason.go +++ b/pkg/utils/condition/reason.go @@ -161,9 +161,15 @@ const ( // UpdateRunProgressingReason is the reason string of condition if the staged update run is progressing. UpdateRunProgressingReason = "UpdateRunProgressing" + // UpdateRunPausedReason is the reason string of condition if the staged update run is paused. + UpdateRunPausedReason = "UpdateRunPaused" + // UpdateRunFailedReason is the reason string of condition if the staged update run failed. UpdateRunFailedReason = "UpdateRunFailed" + // UpdateRunAbandonedReason is the reason string of condition if the staged update run is abandoned. + UpdateRunAbandonedReason = "UpdateRunAbandoned" + // UpdateRunStuckReason is the reason string of condition if the staged update run is stuck waiting for a cluster to be updated. UpdateRunStuckReason = "UpdateRunStuck" diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index fc171ea48..1bebd40c2 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -2040,6 +2040,17 @@ func updateRunStageRolloutSucceedConditions(generation int64) []metav1.Condition } } +func updateRunStageRolloutWaitingConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.StageUpdatingConditionProgressing), + Status: metav1.ConditionFalse, + Reason: condition.StageUpdatingWaitingReason, + ObservedGeneration: generation, + }, + } +} + func updateRunStageTaskSucceedConditions(generation int64, taskType placementv1beta1.StageTaskType) []metav1.Condition { if taskType == placementv1beta1.StageTaskTypeApproval { return []metav1.Condition{ @@ -2090,6 +2101,57 @@ func updateRunSucceedConditions(generation int64) []metav1.Condition { } } +func updateRunStoppedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), + Status: metav1.ConditionTrue, + Reason: condition.UpdateRunInitializeSucceededReason, + ObservedGeneration: 1, + }, + { + Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), + Status: metav1.ConditionFalse, + Reason: condition.UpdateRunPausedReason, + ObservedGeneration: generation, + }, + } +} + +func updateRunAbandonedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), + Status: metav1.ConditionTrue, + Reason: condition.UpdateRunInitializeSucceededReason, + ObservedGeneration: 1, + }, + { + Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), + Status: metav1.ConditionFalse, + Reason: condition.UpdateRunAbandonedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), + Status: metav1.ConditionFalse, + Reason: condition.UpdateRunAbandonedReason, + ObservedGeneration: generation, + }, + } +} + +func updateRunInitializedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), + Status: metav1.ConditionTrue, + Reason: condition.UpdateRunInitializeSucceededReason, + ObservedGeneration: generation, + }, + } +} + func clusterStagedUpdateRunStatusSucceededActual( updateRunName string, wantResourceIndex string, @@ -2126,6 +2188,256 @@ func clusterStagedUpdateRunStatusSucceededActual( } } +func clusterStagedUpdateRunStatusAbandonedActual( + updateRunName string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-2) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunAbandonedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + +func clusterStagedUpdateRunStatusStoppedActual( + updateRunName string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-1) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunStoppedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + +func clusterStagedUpdateRunStatusNotStartedActual( + updateRunName string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) + for i, stage := range wantStrategySpec.Stages { + stagesStatus[i].StageName = stage.Name + stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) + for j := range stagesStatus[i].Clusters { + stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] + stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] + stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] + } + stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) + for j, task := range stage.AfterStageTasks { + stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + } + } + } + wantStatus.StagesStatus = stagesStatus + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + +func stagedUpdateRunStatusAbandonedActual( + updateRunName, namespace string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.StagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-2) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunAbandonedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + +func stagedUpdateRunStatusNotStartedActual( + updateRunName, namespace string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.StagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) + for i, stage := range wantStrategySpec.Stages { + stagesStatus[i].StageName = stage.Name + stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) + for j := range stagesStatus[i].Clusters { + stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] + stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] + stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] + } + stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) + for j, task := range stage.AfterStageTasks { + stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + } + } + } + wantStatus.StagesStatus = stagesStatus + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + +func stagedUpdateRunStatusStoppedActual( + updateRunName, namespace string, + wantResourceIndex string, + wantPolicyIndex string, + wantClusterCount int, + wantApplyStrategy *placementv1beta1.ApplyStrategy, + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantUnscheduledClusters []string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, +) func() error { + return func() error { + updateRun := &placementv1beta1.StagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { + return err + } + + wantStatus := placementv1beta1.UpdateRunStatus{ + PolicySnapshotIndexUsed: wantPolicyIndex, + ResourceSnapshotIndexUsed: wantResourceIndex, + PolicyObservedClusterCount: wantClusterCount, + ApplyStrategy: wantApplyStrategy.DeepCopy(), + UpdateStrategySnapshot: wantStrategySpec, + } + + wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-1) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunStoppedConditions(updateRun.Generation) + if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { + return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) + } + return nil + } +} + func stagedUpdateRunStatusSucceededActual( updateRunName, namespace string, wantResourceIndex, wantPolicyIndex string, @@ -2161,6 +2473,58 @@ func stagedUpdateRunStatusSucceededActual( } } +func buildStageUpdatingStatusesWithGeneration( + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, + updateRun placementv1beta1.UpdateRunObj, + generation int64, +) []placementv1beta1.StageUpdatingStatus { + stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) + for i, stage := range wantStrategySpec.Stages { + stagesStatus[i].StageName = stage.Name + stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) + for j := range stagesStatus[i].Clusters { + stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] + stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] + stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] + if i == 0 { + stagesStatus[i].Clusters[j].Conditions = updateRunClusterRolloutSucceedConditions(generation) + } + } + stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) + for j, task := range stage.AfterStageTasks { + stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + } + if i == 0 { + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].AfterStageTaskStatus[j].Conditions = append(stagesStatus[i].AfterStageTaskStatus[j].Conditions, metav1.Condition{ + Type: string(placementv1beta1.StageTaskConditionApprovalRequestCreated), + Status: metav1.ConditionTrue, + Reason: condition.AfterStageTaskApprovalRequestCreatedReason, + ObservedGeneration: generation, + }) + } + if task.Type == placementv1beta1.StageTaskTypeTimedWait { + stagesStatus[i].AfterStageTaskStatus[j].Conditions = append(stagesStatus[i].AfterStageTaskStatus[j].Conditions, metav1.Condition{ + Type: string(placementv1beta1.StageTaskConditionWaitTimeElapsed), + Status: metav1.ConditionTrue, + Reason: condition.AfterStageTaskWaitTimeElapsedReason, + ObservedGeneration: generation, + }) + } + } + } + if i == 0 { + stagesStatus[i].Conditions = updateRunStageRolloutWaitingConditions(generation) + } + } + return stagesStatus +} + func buildStageUpdatingStatuses( wantStrategySpec *placementv1beta1.UpdateStrategySpec, wantSelectedClusters [][]string, @@ -2202,6 +2566,15 @@ func buildStageUpdatingStatuses( func buildDeletionStageStatus( wantUnscheduledClusters []string, updateRun placementv1beta1.UpdateRunObj, +) *placementv1beta1.StageUpdatingStatus { + deleteStageStatus := buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + deleteStageStatus.Conditions = updateRunStageRolloutSucceedConditions(updateRun.GetGeneration()) + return deleteStageStatus +} + +func buildDeletionStatusWithoutConditions( + wantUnscheduledClusters []string, + updateRun placementv1beta1.UpdateRunObj, ) *placementv1beta1.StageUpdatingStatus { deleteStageStatus := &placementv1beta1.StageUpdatingStatus{ StageName: "kubernetes-fleet.io/deleteStage", @@ -2211,7 +2584,6 @@ func buildDeletionStageStatus( deleteStageStatus.Clusters[i].ClusterName = wantUnscheduledClusters[i] deleteStageStatus.Clusters[i].Conditions = updateRunClusterRolloutSucceedConditions(updateRun.GetGeneration()) } - deleteStageStatus.Conditions = updateRunStageRolloutSucceedConditions(updateRun.GetGeneration()) return deleteStageStatus } diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index 5bae5faa9..432871b34 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -27,6 +27,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -313,7 +314,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -377,7 +378,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -425,7 +426,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -538,7 +539,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -592,7 +593,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -647,7 +648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -742,7 +743,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -795,7 +796,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -849,7 +850,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -1025,7 +1026,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1134,7 +1135,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1250,7 +1251,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) // Verify rollout to canary cluster first By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1324,7 +1325,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should create a staged update run and verify cluster approval request is created", func() { validateLatestClusterResourceSnapshot(crpName, resourceSnapshotIndex1st) validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) // Verify that cluster approval request is created for canary stage. Eventually(func() error { @@ -1421,7 +1422,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create updateRun and verify resources are rolled out", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) // Approval for AfterStageTasks of canary stage validateAndApproveClusterApprovalRequests(updateRunName, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1557,7 +1558,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the cluster staged update run with all 3 clusters updated in parallel", func() { @@ -1647,7 +1648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the cluster staged update run with all 3 clusters", func() { @@ -1665,6 +1666,130 @@ var _ = Describe("test CRP rollout with staged update run", func() { Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) }) + + Context("Test resource rollout with staged update run by update run states - (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { + updateRunNames := []string{} + var strategy *placementv1beta1.ClusterStagedUpdateStrategy + + BeforeAll(func() { + // Create a test namespace and a configMap inside it on the hub cluster. + createWorkResources() + + // Create the CRP with external rollout strategy. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.ExternalRolloutStrategyType, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + // Create the clusterStagedUpdateStrategy. + strategy = createClusterStagedUpdateStrategySucceed(strategyName) + + for i := 0; i < 1; i++ { + updateRunNames = append(updateRunNames, fmt.Sprintf(clusterStagedUpdateRunNameWithSubIndexTemplate, GinkgoParallelProcess(), i)) + } + }) + + AfterAll(func() { + // Remove the custom deletion blocker finalizer from the CRP. + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + // Remove all the clusterStagedUpdateRuns. + for _, name := range updateRunNames { + ensureClusterStagedUpdateRunDeletion(name) + } + + // Delete the clusterStagedUpdateStrategy. + ensureClusterUpdateRunStrategyDeletion(strategyName) + }) + + It("Should not rollout any resources to member clusters as there's no update run yet", checkIfRemovedWorkResourcesFromAllMemberClustersConsistently) + + It("Should have the latest resource snapshot", func() { + validateLatestClusterResourceSnapshot(crpName, resourceSnapshotIndex1st) + }) + + It("Should successfully schedule the crp", func() { + validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) + }) + + It("Should update crp status as pending rollout", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", "", ""}, []bool{false, false, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("Should create a cluster staged update run successfully", func() { + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) + }) + + It("Should not start rollout as the update run is in NotStarted state", func() { + By("Member clusters should not have work resources placed") + checkIfRemovedWorkResourcesFromAllMemberClustersConsistently() + + By("Validating the csur status remains in NotStarted state") + csurNotStartedActual := clusterStagedUpdateRunStatusNotStartedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Consistently(csurNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep updateRun %s in NotStarted state", updateRunNames[0]) + }) + + It("Should rollout resources to member-cluster-2 only after update run is in Started state", func() { + // Update the update run state to Started. + By("Updating the update run state to Started") + updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateStarted) + + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("Should stop update run when updated to Stopped state", func() { + // Update the update run state to Stopped. + By("Updating the update run state to Stopped") + updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateStopped) + + By("Validating no further rollouts happen after stopping") + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated only") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + + csurSucceededActual := clusterStagedUpdateRunStatusStoppedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + }) + + It("Should abandon update run when updated to Abandoned state", func() { + // Update the update run state to Abandoned. + By("Updating the update run state to Abandoned") + updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateAbandoned) + + By("Validating no further rollouts happen after abandonment") + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated only") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + + csurSucceededActual := clusterStagedUpdateRunStatusAbandonedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + }) + }) + + //TODO(britaniar): Add more e2e tests for updateRun Start/Stop Implementation }) // Note that this container cannot run in parallel with other containers. @@ -1732,7 +1857,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) By("Creating the first staged update run") - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) By("Validating staged update run has succeeded") csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) @@ -1783,7 +1908,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should create another staged update run for the same CRP", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 2) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the second staged update run and complete the CRP", func() { @@ -1831,7 +1956,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the staged update run, complete CRP, and rollout resources to all member clusters", func() { @@ -1874,7 +1999,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) }) It("Should complete the staged update run, complete CRP, and rollout updated resources to all member clusters", func() { @@ -1913,7 +2038,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the staged update run, complete CRP, and re-place resources to all member clusters", func() { @@ -2059,12 +2184,13 @@ func validateLatestClusterResourceSnapshot(crpName, wantResourceSnapshotIndex st }, eventuallyDuration, eventuallyInterval).Should(Equal(wantResourceSnapshotIndex), "Resource snapshot index does not match") } -func createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex, strategyName string) { +func createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex, strategyName string, state placementv1beta1.State) { updateRun := &placementv1beta1.ClusterStagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ + State: state, PlacementName: crpName, ResourceSnapshotIndex: resourceSnapshotIndex, StagedUpdateStrategyName: strategyName, @@ -2079,6 +2205,7 @@ func createClusterStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunNam Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ + State: placementv1beta1.StateStarted, PlacementName: crpName, StagedUpdateStrategyName: strategyName, }, @@ -2086,6 +2213,14 @@ func createClusterStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunNam Expect(hubClient.Create(ctx, updateRun)).To(Succeed(), "Failed to create ClusterStagedUpdateRun %s", updateRunName) } +func updateClusterStagedUpdateRunState(updateRunName string, state placementv1beta1.State) { + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun)).To(Succeed(), "Failed to get ClusterStagedUpdateRun %s", updateRunName) + + updateRun.Spec.State = state + Expect(hubClient.Update(ctx, updateRun)).To(Succeed(), "Failed to update ClusterStagedUpdateRun %s", updateRunName) +} + func validateAndApproveClusterApprovalRequests(updateRunName, stageName, approvalRequestNameFmt string) { Eventually(func() error { appReqList := &placementv1beta1.ClusterApprovalRequestList{} diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index 60dcfe885..e90bed1fc 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -300,7 +300,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -364,7 +364,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -412,7 +412,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -523,7 +523,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -577,7 +577,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -632,7 +632,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -725,7 +725,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -778,7 +778,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -832,7 +832,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -980,7 +980,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1084,7 +1084,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1198,7 +1198,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) // Verify rollout to canary cluster first. By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1284,7 +1284,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create updateRun and verify resources are rolled out", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) // Approval for AfterStageTask of canary stage validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1419,7 +1419,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the staged update run with all 3 clusters updated in parallel", func() { @@ -1508,7 +1508,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) }) It("Should complete the staged update run with all 3 clusters", func() { @@ -1526,6 +1526,128 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", testNamespace, rpName) }) }) + + Context("Test resource rollout with staged update run by update run states - (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { + updateRunNames := []string{} + var strategy *placementv1beta1.StagedUpdateStrategy + + BeforeAll(func() { + // Create the RP with external rollout strategy. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: testNamespace, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.ExternalRolloutStrategyType, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + + // Create the stagedUpdateStrategy. + strategy = createStagedUpdateStrategySucceed(strategyName, testNamespace) + + for i := 0; i < 3; i++ { + updateRunNames = append(updateRunNames, fmt.Sprintf(stagedUpdateRunNameWithSubIndexTemplate, GinkgoParallelProcess(), i)) + } + }) + + AfterAll(func() { + // Remove the custom deletion blocker finalizer from the RP. + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: testNamespace}, allMemberClusters) + + // Remove all the stagedUpdateRuns. + for _, name := range updateRunNames { + ensureStagedUpdateRunDeletion(name, testNamespace) + } + + // Delete the stagedUpdateStrategy. + ensureStagedUpdateRunStrategyDeletion(strategyName, testNamespace) + }) + + It("Should not rollout any resources to member clusters as there's no update run yet", checkIfRemovedConfigMapFromAllMemberClustersConsistently) + + It("Should have the latest resource snapshot", func() { + validateLatestResourceSnapshot(rpName, testNamespace, resourceSnapshotIndex1st) + }) + + It("Should successfully schedule the rp", func() { + validateLatestSchedulingPolicySnapshot(rpName, testNamespace, policySnapshotIndex1st, 3) + }) + + It("Should update rp status as pending rollout", func() { + rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", "", ""}, []bool{false, false, false}, nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", testNamespace, rpName) + }) + + It("Should create a staged update run successfully", func() { + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) + }) + + It("Should not start rollout as the update run is in NotStarted state", func() { + By("Member clusters should not have work resources placed") + checkIfRemovedConfigMapFromAllMemberClustersConsistently() + + By("Validating the sur status remains in NotStarted state") + surNotStartedActual := stagedUpdateRunStatusNotStartedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Consistently(surNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep updateRun %s in NotStarted state", updateRunNames[0]) + }) + + It("Should rollout resources to member-cluster-2 only after update run is in Started state", func() { + // Update the update run state to Started. + By("Updating the update run state to Started") + updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateStarted) + + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("Should stop update run when updated to Stopped state", func() { + // Update the update run state to Stopped. + By("Updating the update run state to Stopped") + updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateStopped) + + By("Validating no further rollouts happen after stopping") + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated only") + rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + + surSucceededActual := stagedUpdateRunStatusStoppedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + }) + + It("Should abandon update run when updated to Abandoned state", func() { + // Update the update run state to Abandoned. + By("Updating the update run state to Abandoned") + updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateAbandoned) + + By("Validating no further rollouts happen after abandonment") + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) + checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated only") + rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + + surSucceededActual := stagedUpdateRunStatusAbandonedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + }) + }) + + //TODO(britaniar): Add more e2e tests for updateRun Start/Stop Implementation }) func createStagedUpdateStrategySucceed(strategyName, namespace string) *placementv1beta1.StagedUpdateStrategy { @@ -1621,7 +1743,7 @@ func validateLatestResourceSnapshot(rpName, namespace, wantResourceSnapshotIndex }, eventuallyDuration, eventuallyInterval).Should(Equal(wantResourceSnapshotIndex), "Resource snapshot index does not match") } -func createStagedUpdateRunSucceed(updateRunName, namespace, rpName, resourceSnapshotIndex, strategyName string) { +func createStagedUpdateRunSucceed(updateRunName, namespace, rpName, resourceSnapshotIndex, strategyName string, state placementv1beta1.State) { updateRun := &placementv1beta1.StagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Name: updateRunName, @@ -1629,6 +1751,7 @@ func createStagedUpdateRunSucceed(updateRunName, namespace, rpName, resourceSnap }, Spec: placementv1beta1.UpdateRunSpec{ PlacementName: rpName, + State: state, ResourceSnapshotIndex: resourceSnapshotIndex, StagedUpdateStrategyName: strategyName, }, @@ -1643,6 +1766,7 @@ func createStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunName, name Namespace: namespace, }, Spec: placementv1beta1.UpdateRunSpec{ + State: placementv1beta1.StateStarted, PlacementName: rpName, StagedUpdateStrategyName: strategyName, }, @@ -1650,6 +1774,14 @@ func createStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunName, name Expect(hubClient.Create(ctx, updateRun)).To(Succeed(), "Failed to create StagedUpdateRun %s", updateRunName) } +func updateStagedUpdateRunState(updateRunName, namespace string, state placementv1beta1.State) { + updateRun := &placementv1beta1.StagedUpdateRun{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun)).To(Succeed(), "Failed to get StagedUpdateRun %s", updateRunName) + + updateRun.Spec.State = state + Expect(hubClient.Update(ctx, updateRun)).To(Succeed(), "Failed to update StagedUpdateRun %s", updateRunName) +} + func validateAndApproveNamespacedApprovalRequests(updateRunName, namespace, stageName, approvalRequestNameFmt string) { Eventually(func() error { appReqList := &placementv1beta1.ApprovalRequestList{} From 5d9c4a014cc3a4c2131f9f7baa077aa8c0e7b935 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Wed, 26 Nov 2025 13:26:25 -0600 Subject: [PATCH 2/9] Initialize and Execute implementation Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/updaterun/controller.go | 77 +--- .../updaterun/controller_integration_test.go | 51 ++- pkg/controllers/updaterun/execution.go | 9 +- .../updaterun/execution_integration_test.go | 360 +++--------------- pkg/controllers/updaterun/initialization.go | 21 +- .../initialization_integration_test.go | 50 --- pkg/controllers/updaterun/validation.go | 11 +- .../updaterun/validation_integration_test.go | 3 +- pkg/utils/condition/reason.go | 9 +- test/e2e/actuals_test.go | 246 +----------- test/e2e/cluster_staged_updaterun_test.go | 55 +-- test/e2e/staged_updaterun_test.go | 61 +-- 12 files changed, 165 insertions(+), 788 deletions(-) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index d4040538b..227887242 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -105,14 +105,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim defer emitUpdateRunStatusMetric(updateRun) state := updateRun.GetUpdateRunSpec().State - switch state { // Early check for abandoned state - this is a terminal state, no initialization needed. - case placementv1beta1.StateAbandoned: - klog.V(2).InfoS("The updateRun is abandoned, terminating", "state", state, "updateRun", runObjRef) - return runtime.Result{}, r.recordUpdateRunAbandoned(ctx, updateRun) - case placementv1beta1.StateStopped: // Early check for stopped state - pause the update run if needed. - klog.V(2).InfoS("The updateRun is paused, waiting to resume", "state", state, "updateRun", runObjRef) - return runtime.Result{}, r.recordUpdateRunPaused(ctx, updateRun) - } var updatingStageIndex int var toBeUpdatedBindings, toBeDeletedBindings []placementv1beta1.BindingObj @@ -121,13 +113,20 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // Check if initialized regardless of generation. // The updateRun spec fields are immutable except for the state field. When the state changes, // the update run generation increments, but we don't need to reinitialize since initialization is a one-time setup. - isInitialized := initCond != nil && initCond.Status == metav1.ConditionTrue - if !isInitialized { + if !(initCond != nil && initCond.Status == metav1.ConditionTrue) { // Check if initialization failed for the current generation. if condition.IsConditionStatusFalse(initCond, updateRun.GetGeneration()) { klog.V(2).InfoS("The updateRun has failed to initialize", "errorMsg", initCond.Message, "updateRun", runObjRef) return runtime.Result{}, nil } + + if initCond == nil { + // Update the status to indicate that the updateRun is initializing. + // Requeue immediately to continue with initialization. + klog.V(2).InfoS("The updateRun is initializing", "state", state, "updateRun", runObjRef) + return runtime.Result{RequeueAfter: 1}, r.recordUpdateRunInitializing(ctx, updateRun) + } + var initErr error if toBeUpdatedBindings, toBeDeletedBindings, initErr = r.initialize(ctx, updateRun); initErr != nil { klog.ErrorS(initErr, "Failed to initialize the updateRun", "updateRun", runObjRef) @@ -137,10 +136,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } return runtime.Result{}, initErr } - updatingStageIndex = 0 // start from the first stage (typically for NotStarted or Started states). - klog.V(2).InfoS("Initialized the updateRun", "updateRun", runObjRef) + updatingStageIndex = 0 // start from the first stage (typically for Initialize or Execute states). + klog.V(2).InfoS("Initialized the updateRun", "state", state, "updateRun", runObjRef) } else { - klog.V(2).InfoS("The updateRun is initialized", "updateRun", runObjRef) + klog.V(2).InfoS("The updateRun is initialized", "state", state, "updateRun", runObjRef) // Check if the updateRun is finished. finishedCond := meta.FindStatusCondition(updateRunStatus.Conditions, string(placementv1beta1.StagedUpdateRunConditionSucceeded)) if condition.IsConditionStatusTrue(finishedCond, updateRun.GetGeneration()) || condition.IsConditionStatusFalse(finishedCond, updateRun.GetGeneration()) { @@ -149,7 +148,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } var validateErr error // Validate the updateRun status to ensure the update can be continued and get the updating stage index and cluster indices. - // For Stopped → Started transition, this will resume from where it left off. if updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings, validateErr = r.validate(ctx, updateRun); validateErr != nil { // errStagedUpdatedAborted cannot be retried. if errors.Is(validateErr, errStagedUpdatedAborted) { @@ -168,7 +166,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // Execute the updateRun. if state == placementv1beta1.StateStarted { - klog.V(2).InfoS("Continue to execute the updateRun", "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) + klog.V(2).InfoS("Continue to execute the updateRun", "state", state, "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) finished, waitTime, execErr := r.execute(ctx, updateRun, updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings) if errors.Is(execErr, errStagedUpdatedAborted) { // errStagedUpdatedAborted cannot be retried. @@ -189,6 +187,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim if execErr != nil { return runtime.Result{}, execErr } + if waitTime == 0 { + // If update run is not finished and waitTime is zero, the waitTime needs to be update to a non-zero value + // as we are using RequeueAfter only since Requeue is deprecated. + return runtime.Result{RequeueAfter: 1}, nil + } return runtime.Result{RequeueAfter: waitTime}, nil } klog.V(2).InfoS("The updateRun is not started, waiting to be started", "state", state, "updateRun", runObjRef) @@ -285,50 +288,6 @@ func (r *Reconciler) recordUpdateRunFailed(ctx context.Context, updateRun placem return nil } -// recordUpdateRunPaused records the progressing condition as paused in the updateRun status. -func (r *Reconciler) recordUpdateRunPaused(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { - updateRunStatus := updateRun.GetUpdateRunStatus() - meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ - Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), - Status: metav1.ConditionFalse, - ObservedGeneration: updateRun.GetGeneration(), - Reason: condition.UpdateRunPausedReason, - Message: "The update run is paused", - }) - if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { - klog.ErrorS(updateErr, "Failed to update the updateRun status as paused", "updateRun", klog.KObj(updateRun)) - // updateErr can be retried. - return controller.NewUpdateIgnoreConflictError(updateErr) - } - return nil -} - -// recordUpdateRunAbandoned records the succeeded and progressing condition as abandoned in the updateRun status. -func (r *Reconciler) recordUpdateRunAbandoned(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { - updateRunStatus := updateRun.GetUpdateRunStatus() - meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ - Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), - Status: metav1.ConditionFalse, - ObservedGeneration: updateRun.GetGeneration(), - Reason: condition.UpdateRunAbandonedReason, - Message: "The stages are aborted due to abandonment", - }) - meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ - Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), - Status: metav1.ConditionFalse, - ObservedGeneration: updateRun.GetGeneration(), - Reason: condition.UpdateRunAbandonedReason, - Message: "The update run has been abandoned", - }) - - if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { - klog.ErrorS(updateErr, "Failed to update the updateRun status as failed", "updateRun", klog.KObj(updateRun)) - // updateErr can be retried. - return controller.NewUpdateIgnoreConflictError(updateErr) - } - return nil -} - // recordUpdateRunStatus records the updateRun status. func (r *Reconciler) recordUpdateRunStatus(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index c424c2ff9..2867d556f 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -292,50 +292,40 @@ func generateInitializationFailedMetric(updateRun *placementv1beta1.ClusterStage } } -func generateProgressingMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { +func generateInitializationUnknownMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ - Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), - string(metav1.ConditionTrue), condition.UpdateRunProgressingReason), + Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionInitialized), + string(metav1.ConditionUnknown), condition.UpdateRunInitializingReason), Gauge: &prometheusclientmodel.Gauge{ Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), }, } } -func generateWaitingMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { +func generateProgressingMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), - string(metav1.ConditionFalse), condition.UpdateRunWaitingReason), + string(metav1.ConditionTrue), condition.UpdateRunProgressingReason), Gauge: &prometheusclientmodel.Gauge{ Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), }, } } -func generateStuckMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { +func generateWaitingMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), - string(metav1.ConditionFalse), condition.UpdateRunStuckReason), + string(metav1.ConditionFalse), condition.UpdateRunWaitingReason), Gauge: &prometheusclientmodel.Gauge{ Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), }, } } -func generatePausedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { +func generateStuckMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), - string(metav1.ConditionFalse), condition.UpdateRunPausedReason), - Gauge: &prometheusclientmodel.Gauge{ - Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), - }, - } -} - -func generateAbandonedMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { - return &prometheusclientmodel.Metric{ - Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionSucceeded), - string(metav1.ConditionFalse), condition.UpdateRunAbandonedReason), + string(metav1.ConditionFalse), condition.UpdateRunStuckReason), Gauge: &prometheusclientmodel.Gauge{ Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), }, @@ -838,14 +828,23 @@ func generateFalseCondition(obj client.Object, condType any) metav1.Condition { } } -func generateFalseProgressingCondition(obj client.Object, condType any, reason string) metav1.Condition { - falseCond := generateFalseCondition(obj, condType) - falseCond.Reason = reason - return falseCond -} - -func generateFalseSucceededCondition(obj client.Object, condType any, reason string) metav1.Condition { +func generateFalseProgressingCondition(obj client.Object, condType any, succeeded bool) metav1.Condition { falseCond := generateFalseCondition(obj, condType) + reason := "" + switch condType { + case placementv1beta1.StagedUpdateRunConditionProgressing: + if succeeded { + reason = condition.UpdateRunSucceededReason + } else { + reason = condition.UpdateRunFailedReason + } + case placementv1beta1.StageUpdatingConditionProgressing: + if succeeded { + reason = condition.StageUpdatingSucceededReason + } else { + reason = condition.StageUpdatingFailedReason + } + } falseCond.Reason = reason return falseCond } diff --git a/pkg/controllers/updaterun/execution.go b/pkg/controllers/updaterun/execution.go index b998abd95..1180ae34a 100644 --- a/pkg/controllers/updaterun/execution.go +++ b/pkg/controllers/updaterun/execution.go @@ -166,13 +166,13 @@ func (r *Reconciler) executeUpdatingStage( for i := 0; i < len(updatingStageStatus.Clusters) && clusterUpdatingCount < maxConcurrency; i++ { clusterStatus := &updatingStageStatus.Clusters[i] clusterUpdateSucceededCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) - if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionTrue { + if condition.IsConditionStatusTrue(clusterUpdateSucceededCond, updateRun.GetGeneration()) { // The cluster has been updated successfully. finishedClusterCount++ continue } clusterUpdatingCount++ - if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionFalse { + if condition.IsConditionStatusFalse(clusterUpdateSucceededCond, updateRun.GetGeneration()) { // The cluster is marked as failed to update, this cluster is counted as updating cluster since it's not finished to avoid processing more clusters than maxConcurrency in this round. failedErr := fmt.Errorf("the cluster `%s` in the stage %s has failed", clusterStatus.ClusterName, updatingStageStatus.StageName) klog.ErrorS(failedErr, "The cluster has failed to be updated", "updateRun", updateRunRef) @@ -182,7 +182,7 @@ func (r *Reconciler) executeUpdatingStage( // The cluster needs to be processed. clusterStartedCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)) binding := toBeUpdatedBindingsMap[clusterStatus.ClusterName] - if clusterStartedCond == nil || clusterStartedCond.Status != metav1.ConditionTrue { + if !condition.IsConditionStatusTrue(clusterStartedCond, updateRun.GetGeneration()) { // The cluster has not started updating yet. if !isBindingSyncedWithClusterStatus(resourceSnapshotName, updateRun, binding, clusterStatus) { klog.V(2).InfoS("Found the first cluster that needs to be updated", "cluster", clusterStatus.ClusterName, "stage", updatingStageStatus.StageName, "updateRun", updateRunRef) @@ -351,8 +351,7 @@ func (r *Reconciler) executeDeleteStage( // In validation, we already check the binding must exist in the status. delete(existingDeleteStageClusterMap, bindingSpec.TargetCluster) // Make sure the cluster is not marked as deleted as the binding is still there. - clusterDeleteSucceededCond := meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) - if clusterDeleteSucceededCond != nil && clusterDeleteSucceededCond.Status == metav1.ConditionTrue { + if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)), updateRun.GetGeneration()) { unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("the deleted cluster `%s` in the deleting stage still has a binding", bindingSpec.TargetCluster)) klog.ErrorS(unexpectedErr, "The cluster in the deleting stage is not removed yet but marked as deleted", "cluster", curCluster.ClusterName, "updateRun", updateRunRef) return false, fmt.Errorf("%w: %s", errStagedUpdatedAborted, unexpectedErr.Error()) diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index 62b731939..d1d9f1a78 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -345,7 +345,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) // 1st stage completed, mark progressing condition reason as succeeded and add succeeded condition. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // 2nd stage waiting for before stage tasks. wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) @@ -549,7 +549,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) meta.SetStatusCondition(&wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) @@ -606,10 +606,10 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) } // Mark the stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -692,9 +692,9 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { By("Validating the updateRun has failed") wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingFailedReason) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, false) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false) wantStatus.Conditions = append(wantStatus.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -865,13 +865,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -971,13 +971,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1109,7 +1109,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1205,7 +1205,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1484,13 +1484,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 1st stage has completed") wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) // Need to have a longer wait time for the test to pass, because of the long wait time specified in the update strategy. timeout = time.Second * 90 @@ -1518,245 +1518,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { }) }) - Context("Cluster staged update run should NOT update clusters one by one - different states (NotStarted -> Abandoned)", Ordered, func() { - var wantMetrics []*io_prometheus_client.Metric - BeforeAll(func() { - By("Creating a new clusterStagedUpdateRun") - updateRun.Spec.State = placementv1beta1.StateNotStarted - Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) - - By("Validating the initialization succeeded and but not execution started") - wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should not start execution when the state is NotStarted", func() { - By("Validating no execution has started") - Consistently(func() bool { - var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun - if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { - return false - } - return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && - meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil - }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") - - By("Validating the 1st clusterResourceBinding is updated to NOT Bound") - binding := resourceBindings[0] // cluster-0 - validateNotBindingState(ctx, binding) - }) - - It("Should not continue further after changing the state to Abandoned", func() { - By("Updating the updateRun state to Abandoned") - updateRun.Spec.State = placementv1beta1.StateAbandoned - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has not started") - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - }) - - Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started-> Abandoned)", Ordered, func() { - var wantMetrics []*io_prometheus_client.Metric - BeforeAll(func() { - By("Creating a new clusterStagedUpdateRun") - updateRun.Spec.State = placementv1beta1.StateNotStarted - Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) - - By("Validating the initialization succeeded and but not execution started") - wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should not start execution when the state is NotStarted", func() { - By("Validating no execution has started") - Consistently(func() bool { - var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun - if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { - return false - } - return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && - meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil - }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") - - By("Validating the 1st clusterResourceBinding is updated to NOT Bound") - binding := resourceBindings[0] // cluster-0 - validateNotBindingState(ctx, binding) - }) - - It("Should start execution after changing the state to Started", func() { - By("Updating the updateRun state to Started") - updateRun.Spec.State = placementv1beta1.StateStarted - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has started") - wantStatus = generateExecutionStartedStatus(updateRun, wantStatus) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should mark the 1st cluster in the 1st stage as succeeded after marking the binding available", func() { - By("Validating the 1st clusterResourceBinding is updated to Bound") - binding := resourceBindings[0] // cluster-0 - validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) - - By("Updating the 1st clusterResourceBinding to Available") - meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) - Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") - - By("Validating the 1st cluster has succeeded and 2nd cluster has started") - wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Validating the 1st stage has startTime set") - Expect(updateRun.Status.StagesStatus[0].StartTime).ShouldNot(BeNil()) - - By("Checking update run status metrics are emitted") - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should abandon execution after changing the state to Abandoned", func() { - By("Updating the updateRun state to Abandoned") - updateRun.Spec.State = placementv1beta1.StateAbandoned - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has been abandoned") - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) - wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - }) - - Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { - var wantMetrics []*io_prometheus_client.Metric - BeforeAll(func() { - By("Creating a new clusterStagedUpdateRun") - updateRun.Spec.State = placementv1beta1.StateNotStarted - Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) - - By("Validating the initialization succeeded and but not execution started") - wantStatus = generateSucceededInitializationStatusForSmallClusters(crp, updateRun, testResourceSnapshotIndex, policySnapshot, updateStrategy) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateInitializationSucceededMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should not start execution when the state is NotStarted", func() { - By("Validating no execution has started") - Consistently(func() bool { - var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun - if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { - return false - } - return meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) == nil && - meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) == nil - }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") - - By("Validating the 1st clusterResourceBinding is updated to NOT Bound") - binding := resourceBindings[0] // cluster-0 - validateNotBindingState(ctx, binding) - }) - - It("Should start execution after changing the state to Started", func() { - By("Updating the updateRun state to Started") - updateRun.Spec.State = placementv1beta1.StateStarted - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has started") - wantStatus = generateExecutionStartedStatus(updateRun, wantStatus) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should mark the 1st cluster in the 1st stage as succeeded after marking the binding available", func() { - By("Validating the 1st clusterResourceBinding is updated to Bound") - binding := resourceBindings[0] // cluster-0 - validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) - - By("Updating the 1st clusterResourceBinding to Available") - meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) - Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") - - By("Validating the 1st cluster has succeeded and 2nd cluster has started") - wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Validating the 1st stage has startTime set") - Expect(updateRun.Status.StagesStatus[0].StartTime).ShouldNot(BeNil()) - - By("Checking update run status metrics are emitted") - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should stop execution after changing the state to Stopped", func() { - By("Updating the updateRun state to Stopped") - updateRun.Spec.State = placementv1beta1.StateStopped - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has stopped at the 2nd cluster of the 1st stage") - generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized) - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunPausedReason)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generatePausedMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should not continue execution when the state is Stopped", func() { - By("Validating no execution has started") - Consistently(func() bool { - var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun - if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { - return false - } - updateRunStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) - return condition.IsConditionStatusFalse(updateRunStatusCond, currentUpdateRun.Generation) - }, timeout, interval).Should(BeTrue(), "execution has started unexpectedly") - }) - - It("Should abandon execution after changing the state to Abandoned", func() { - By("Updating the updateRun state to Abandoned") - updateRun.Spec.State = placementv1beta1.StateAbandoned - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has been abandoned") - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) - wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - }) - - Context("Cluster staged update run should update clusters one by one - different states (NotStarted -> Started -> Stopped -> Started -> Abandoned)", Ordered, func() { + Context("Cluster staged update run should update clusters one by one - different states (Initialized -> Execute)", Ordered, func() { var wantMetrics []*io_prometheus_client.Metric BeforeAll(func() { By("Creating a new clusterStagedUpdateRun") @@ -1772,7 +1534,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { validateUpdateRunMetricsEmitted(wantMetrics...) }) - It("Should not start execution when the state is NotStarted", func() { + It("Should not start execution when the state is Initialize", func() { By("Validating no execution has started") Consistently(func() bool { var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun @@ -1788,8 +1550,8 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { validateNotBindingState(ctx, binding) }) - It("Should start execution after changing the state to Started", func() { - By("Updating the updateRun state to Started") + It("Should start execution after changing the state to Execute", func() { + By("Updating the updateRun state to Execute") updateRun.Spec.State = placementv1beta1.StateStarted Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") @@ -1823,55 +1585,6 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { validateUpdateRunMetricsEmitted(wantMetrics...) }) - It("Should stop execution after changing the state to Stopped", func() { - By("Updating the updateRun state to Stopped") - updateRun.Spec.State = placementv1beta1.StateStopped - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has stopped at the 2nd cluster of the 1st stage") - generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized) - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunPausedReason)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generatePausedMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - - It("Should not continue execution when the state is Stopped", func() { - By("Validating no execution has started") - Consistently(func() error { - var currentUpdateRun placementv1beta1.ClusterStagedUpdateRun - if err := k8sClient.Get(ctx, types.NamespacedName{Name: updateRun.Name}, ¤tUpdateRun); err != nil { - return err - } - updateRunStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) - if condition.IsConditionStatusTrue(updateRunStatusCond, currentUpdateRun.Generation) { - return fmt.Errorf("update run progressing condition is true unexpectedly") - } - updateRunClusterStatusCond := meta.FindStatusCondition(currentUpdateRun.Status.StagesStatus[0].Clusters[1].Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) - if updateRunClusterStatusCond != nil { - return fmt.Errorf("2nd cluster in 1st stage succeeded condition is set unexpectedly") - } - return nil - }, timeout, interval).Should(BeNil(), "execution has started unexpectedly") - }) - - It("Should continue execution after changing the state to Started", func() { - By("Updating the updateRun state to Started") - updateRun.Spec.State = placementv1beta1.StateStarted - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") - - By("Validating the execution has started") - // UpdateRun is already initialized, so only need to set the progressing condition to true. - meta.SetStatusCondition(&wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) - validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") - - By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateProgressingMetric(updateRun)) - validateUpdateRunMetricsEmitted(wantMetrics...) - }) - It("Should mark the 2nd cluster in the 1st stage as succeeded after marking the binding available", func() { By("Validating the 2nd clusterResourceBinding is updated to Bound") binding := resourceBindings[1] // cluster-1 @@ -1890,18 +1603,33 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { validateUpdateRunMetricsEmitted(wantMetrics...) }) - It("Should abandon execution after changing the state to Abandoned", func() { - By("Updating the updateRun state to Abandoned") - updateRun.Spec.State = placementv1beta1.StateAbandoned - Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") + It("Should mark the 3rd cluster in the 1st stage as succeeded after marking the binding available and complete the updateRun", func() { + By("Validating the 3rd clusterResourceBinding is updated to Bound") + binding := resourceBindings[2] // cluster-2 + validateBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + + By("Updating the 3rd clusterResourceBinding to Available") + meta.SetStatusCondition(&binding.Status.Conditions, generateTrueCondition(binding, placementv1beta1.ResourceBindingAvailable)) + Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") - By("Validating the execution has been abandoned") - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunAbandonedReason)) - wantStatus.Conditions = append(wantStatus.Conditions, generateFalseSucceededCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded, condition.UpdateRunAbandonedReason)) + By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") + wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + // 1st stage completed. + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) + // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) + // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. + wantStatus.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") + By("Validating the 1st stage has endTime set") + Expect(updateRun.Status.StagesStatus[0].EndTime).ShouldNot(BeNil()) + By("Checking update run status metrics are emitted") - wantMetrics = append(wantMetrics, generateAbandonedMetric(updateRun)) + wantMetrics = append(wantMetrics, generateSucceededMetric(updateRun)) validateUpdateRunMetricsEmitted(wantMetrics...) }) }) diff --git a/pkg/controllers/updaterun/initialization.go b/pkg/controllers/updaterun/initialization.go index baa07be2b..0fcd32e77 100644 --- a/pkg/controllers/updaterun/initialization.go +++ b/pkg/controllers/updaterun/initialization.go @@ -619,7 +619,7 @@ func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRu Status: metav1.ConditionTrue, ObservedGeneration: updateRun.GetGeneration(), Reason: condition.UpdateRunInitializeSucceededReason, - Message: "ClusterStagedUpdateRun initialized successfully", + Message: "The UpdateRun initialized successfully", }) if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { klog.ErrorS(updateErr, "Failed to update the UpdateRun status as initialized", "updateRun", klog.KObj(updateRun)) @@ -629,6 +629,25 @@ func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRu return nil } +// recordUpdateRunInitializing records the unknown initialization condition in the UpdateRun status. +// The UpdateRun is currently initializing. +func (r *Reconciler) recordUpdateRunInitializing(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { + updateRunStatus := updateRun.GetUpdateRunStatus() + meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), + Status: metav1.ConditionUnknown, + ObservedGeneration: updateRun.GetGeneration(), + Reason: condition.UpdateRunInitializingReason, + Message: "the UpdateRun is in the process of initializing", + }) + if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { + klog.ErrorS(updateErr, "Failed to update the UpdateRun status as initializing", "updateRun", klog.KObj(updateRun)) + // updateErr can be retried. + return controller.NewUpdateIgnoreConflictError(updateErr) + } + return nil +} + // recordInitializationFailed records the failed initialization condition in the updateRun status. func (r *Reconciler) recordInitializationFailed(ctx context.Context, updateRun placementv1beta1.UpdateRunObj, message string) error { updateRunStatus := updateRun.GetUpdateRunStatus() diff --git a/pkg/controllers/updaterun/initialization_integration_test.go b/pkg/controllers/updaterun/initialization_integration_test.go index 35c10eff5..0351b8a77 100644 --- a/pkg/controllers/updaterun/initialization_integration_test.go +++ b/pkg/controllers/updaterun/initialization_integration_test.go @@ -941,56 +941,6 @@ var _ = Describe("Updaterun initialization tests", func() { validateUpdateRunMetricsEmitted(generateWaitingMetric(updateRun)) }) }) - - It("Should not initialize if updateRun is created with state Abandoned", func() { - By("Creating a new clusterStagedUpdateRun in Abandoned state") - updateRun.Spec.State = placementv1beta1.StateAbandoned - Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) - - By("Validating the updateRun is not initialized") - // Populate the cache first. - Eventually(func() error { - if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { - return err - } - return nil - }, timeout, interval).Should(Succeed(), "failed to get the updateRun") - Consistently(func() error { - if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { - return err - } - initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) - if initCond != nil { - return fmt.Errorf("got initialization condition: %v, want nil", initCond) - } - return nil - }, duration, interval).Should(Succeed(), "the abandoned updateRun should not be initialized") - }) - - It("Should not initialize if updateRun is created with state Stopped ", func() { - By("Creating a new clusterStagedUpdateRun in Stopped state") - updateRun.Spec.State = placementv1beta1.StateStopped - Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) - - By("Validating the updateRun is not initialized") - // Populate the cache first. - Eventually(func() error { - if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { - return err - } - return nil - }, timeout, interval).Should(Succeed(), "failed to get the updateRun") - Consistently(func() error { - if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { - return err - } - initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) - if initCond != nil { - return fmt.Errorf("got initialization condition: %v, want nil", initCond) - } - return nil - }, duration, interval).Should(Succeed(), "the stopped updateRun should not be initialized") - }) }) func validateFailedInitCondition(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun, message string) { diff --git a/pkg/controllers/updaterun/validation.go b/pkg/controllers/updaterun/validation.go index c92a946a9..ffa5ea3c2 100644 --- a/pkg/controllers/updaterun/validation.go +++ b/pkg/controllers/updaterun/validation.go @@ -22,7 +22,6 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -186,7 +185,7 @@ func validateClusterUpdatingStatus( ) (int, int, error) { stageSucceedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionSucceeded)) stageStartedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) - if stageSucceedCond != nil && stageSucceedCond.Status == metav1.ConditionTrue { + if condition.IsConditionStatusTrue(stageSucceedCond, updateRun.GetGeneration()) { // The stage has finished. if updatingStageIndex != -1 && curStage > updatingStageIndex { // The finished stage is after the updating stage. @@ -197,8 +196,10 @@ func validateClusterUpdatingStatus( // Make sure that all the clusters are updated. for curCluster := range stageStatus.Clusters { // Check if the cluster is still updating. - clusterSucceededCond := meta.FindStatusCondition(stageStatus.Clusters[curCluster].Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) - if clusterSucceededCond == nil || clusterSucceededCond.Status == metav1.ConditionFalse { + if !condition.IsConditionStatusTrue(meta.FindStatusCondition( + stageStatus.Clusters[curCluster].Conditions, + string(placementv1beta1.ClusterUpdatingConditionSucceeded)), + updateRun.GetGeneration()) { // The clusters in the finished stage should all have finished too. unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("cluster `%s` in the finished stage `%s` has not succeeded", stageStatus.Clusters[curCluster].ClusterName, stageStatus.StageName)) klog.ErrorS(unexpectedErr, "The cluster in a finished stage is still updating", "updateRun", klog.KObj(updateRun)) @@ -213,7 +214,7 @@ func validateClusterUpdatingStatus( } // Record the last finished stage so we can continue from the next stage if no stage is updating. lastFinishedStageIndex = curStage - } else if stageSucceedCond != nil && stageSucceedCond.Status == metav1.ConditionFalse { + } else if condition.IsConditionStatusFalse(stageSucceedCond, updateRun.GetGeneration()) { // The stage has failed. failedErr := fmt.Errorf("the stage `%s` has failed, err: %s", stageStatus.StageName, stageSucceedCond.Message) klog.ErrorS(failedErr, "The stage has failed", "stageCond", stageSucceedCond, "updateRun", klog.KObj(updateRun)) diff --git a/pkg/controllers/updaterun/validation_integration_test.go b/pkg/controllers/updaterun/validation_integration_test.go index b0d55f5f2..b1190601e 100644 --- a/pkg/controllers/updaterun/validation_integration_test.go +++ b/pkg/controllers/updaterun/validation_integration_test.go @@ -34,7 +34,6 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils" - "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" ) var _ = Describe("UpdateRun validation tests", func() { @@ -565,7 +564,7 @@ func generateFailedValidationStatus( updateRun *placementv1beta1.ClusterStagedUpdateRun, started *placementv1beta1.UpdateRunStatus, ) *placementv1beta1.UpdateRunStatus { - started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason) + started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false) started.Conditions = append(started.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) return started } diff --git a/pkg/utils/condition/reason.go b/pkg/utils/condition/reason.go index 14b778dea..0a6ed34b2 100644 --- a/pkg/utils/condition/reason.go +++ b/pkg/utils/condition/reason.go @@ -152,6 +152,9 @@ const ( // A group of condition reason string which is used to populate the ClusterStagedUpdateRun condition. const ( + // UpdateRunInitializingReason is the reason string of condition if the update run is currently initializing. + UpdateRunInitializingReason = "UpdateRunInitializing" + // UpdateRunInitializeSucceededReason is the reason string of condition if the update run is initialized successfully. UpdateRunInitializeSucceededReason = "UpdateRunInitializedSuccessfully" @@ -161,15 +164,9 @@ const ( // UpdateRunProgressingReason is the reason string of condition if the staged update run is progressing. UpdateRunProgressingReason = "UpdateRunProgressing" - // UpdateRunPausedReason is the reason string of condition if the staged update run is paused. - UpdateRunPausedReason = "UpdateRunPaused" - // UpdateRunFailedReason is the reason string of condition if the staged update run failed. UpdateRunFailedReason = "UpdateRunFailed" - // UpdateRunAbandonedReason is the reason string of condition if the staged update run is abandoned. - UpdateRunAbandonedReason = "UpdateRunAbandoned" - // UpdateRunStuckReason is the reason string of condition if the staged update run is stuck waiting for a cluster to be updated. UpdateRunStuckReason = "UpdateRunStuck" diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 1bebd40c2..d3999f76b 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -2079,12 +2079,16 @@ func updateRunStageTaskSucceedConditions(generation int64, taskType placementv1b } func updateRunSucceedConditions(generation int64) []metav1.Condition { + initializeCondGeneration := generation + if generation > 1 { + initializeCondGeneration = 1 + } return []metav1.Condition{ { Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), Status: metav1.ConditionTrue, Reason: condition.UpdateRunInitializeSucceededReason, - ObservedGeneration: generation, + ObservedGeneration: initializeCondGeneration, }, { Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), @@ -2101,46 +2105,6 @@ func updateRunSucceedConditions(generation int64) []metav1.Condition { } } -func updateRunStoppedConditions(generation int64) []metav1.Condition { - return []metav1.Condition{ - { - Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), - Status: metav1.ConditionTrue, - Reason: condition.UpdateRunInitializeSucceededReason, - ObservedGeneration: 1, - }, - { - Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), - Status: metav1.ConditionFalse, - Reason: condition.UpdateRunPausedReason, - ObservedGeneration: generation, - }, - } -} - -func updateRunAbandonedConditions(generation int64) []metav1.Condition { - return []metav1.Condition{ - { - Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), - Status: metav1.ConditionTrue, - Reason: condition.UpdateRunInitializeSucceededReason, - ObservedGeneration: 1, - }, - { - Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), - Status: metav1.ConditionFalse, - Reason: condition.UpdateRunAbandonedReason, - ObservedGeneration: generation, - }, - { - Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), - Status: metav1.ConditionFalse, - Reason: condition.UpdateRunAbandonedReason, - ObservedGeneration: generation, - }, - } -} - func updateRunInitializedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -2188,79 +2152,7 @@ func clusterStagedUpdateRunStatusSucceededActual( } } -func clusterStagedUpdateRunStatusAbandonedActual( - updateRunName string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.ClusterStagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, - } - - wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-2) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunAbandonedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - -func clusterStagedUpdateRunStatusStoppedActual( - updateRunName string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.ClusterStagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, - } - - wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-1) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunStoppedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - -func clusterStagedUpdateRunStatusNotStartedActual( +func clusterStagedUpdateRunStatusInitializedActual( updateRunName string, wantResourceIndex string, wantPolicyIndex string, @@ -2313,43 +2205,7 @@ func clusterStagedUpdateRunStatusNotStartedActual( } } -func stagedUpdateRunStatusAbandonedActual( - updateRunName, namespace string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.StagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, - } - - wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-2) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunAbandonedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - -func stagedUpdateRunStatusNotStartedActual( +func stagedUpdateRunStatusInitializedActual( updateRunName, namespace string, wantResourceIndex string, wantPolicyIndex string, @@ -2402,42 +2258,6 @@ func stagedUpdateRunStatusNotStartedActual( } } -func stagedUpdateRunStatusStoppedActual( - updateRunName, namespace string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.StagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, - } - - wantStatus.StagesStatus = buildStageUpdatingStatusesWithGeneration(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun, updateRun.GetGeneration()-1) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunStoppedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - func stagedUpdateRunStatusSucceededActual( updateRunName, namespace string, wantResourceIndex, wantPolicyIndex string, @@ -2473,58 +2293,6 @@ func stagedUpdateRunStatusSucceededActual( } } -func buildStageUpdatingStatusesWithGeneration( - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, - updateRun placementv1beta1.UpdateRunObj, - generation int64, -) []placementv1beta1.StageUpdatingStatus { - stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) - for i, stage := range wantStrategySpec.Stages { - stagesStatus[i].StageName = stage.Name - stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) - for j := range stagesStatus[i].Clusters { - stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] - stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] - stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] - if i == 0 { - stagesStatus[i].Clusters[j].Conditions = updateRunClusterRolloutSucceedConditions(generation) - } - } - stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) - for j, task := range stage.AfterStageTasks { - stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type - if task.Type == placementv1beta1.StageTaskTypeApproval { - stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) - } - if i == 0 { - if task.Type == placementv1beta1.StageTaskTypeApproval { - stagesStatus[i].AfterStageTaskStatus[j].Conditions = append(stagesStatus[i].AfterStageTaskStatus[j].Conditions, metav1.Condition{ - Type: string(placementv1beta1.StageTaskConditionApprovalRequestCreated), - Status: metav1.ConditionTrue, - Reason: condition.AfterStageTaskApprovalRequestCreatedReason, - ObservedGeneration: generation, - }) - } - if task.Type == placementv1beta1.StageTaskTypeTimedWait { - stagesStatus[i].AfterStageTaskStatus[j].Conditions = append(stagesStatus[i].AfterStageTaskStatus[j].Conditions, metav1.Condition{ - Type: string(placementv1beta1.StageTaskConditionWaitTimeElapsed), - Status: metav1.ConditionTrue, - Reason: condition.AfterStageTaskWaitTimeElapsedReason, - ObservedGeneration: generation, - }) - } - } - } - if i == 0 { - stagesStatus[i].Conditions = updateRunStageRolloutWaitingConditions(generation) - } - } - return stagesStatus -} - func buildStageUpdatingStatuses( wantStrategySpec *placementv1beta1.UpdateStrategySpec, wantSelectedClusters [][]string, diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index 432871b34..40ffeb209 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -1667,7 +1667,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) }) - Context("Test resource rollout with staged update run by update run states - (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { + Context("Test resource rollout with staged update run by update run states - (Initialize -> Execute)", Ordered, func() { updateRunNames := []string{} var strategy *placementv1beta1.ClusterStagedUpdateStrategy @@ -1729,21 +1729,22 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { + By("Creating Cluster Staged Update Run in state Initialize") createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) }) - It("Should not start rollout as the update run is in NotStarted state", func() { + It("Should not start rollout as the update run is in Initialize state", func() { By("Member clusters should not have work resources placed") checkIfRemovedWorkResourcesFromAllMemberClustersConsistently() - By("Validating the csur status remains in NotStarted state") - csurNotStartedActual := clusterStagedUpdateRunStatusNotStartedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) - Consistently(csurNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep updateRun %s in NotStarted state", updateRunNames[0]) + By("Validating the csur status remains in Initialize state") + csurNotStartedActual := clusterStagedUpdateRunStatusInitializedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Consistently(csurNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to Initialize updateRun %s", updateRunNames[0]) }) - It("Should rollout resources to member-cluster-2 only after update run is in Started state", func() { - // Update the update run state to Started. - By("Updating the update run state to Started") + It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { + // Update the update run state to Execute + By("Updating the update run state to Execute") updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateStarted) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) @@ -1752,44 +1753,22 @@ var _ = Describe("test CRP rollout with staged update run", func() { By("Validating crp status as member-cluster-2 updated") crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) - }) - - It("Should stop update run when updated to Stopped state", func() { - // Update the update run state to Stopped. - By("Updating the update run state to Stopped") - updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateStopped) - - By("Validating no further rollouts happen after stopping") - checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) - checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) - By("Validating crp status as member-cluster-2 updated only") - crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[0], envCanary) + }) - csurSucceededActual := clusterStagedUpdateRunStatusStoppedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + It("Should rollout resources to all the members and complete the cluster staged update run successfully", func() { + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) - It("Should abandon update run when updated to Abandoned state", func() { - // Update the update run state to Abandoned. - By("Updating the update run state to Abandoned") - updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateAbandoned) - - By("Validating no further rollouts happen after abandonment") - checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) - checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) - - By("Validating crp status as member-cluster-2 updated only") - crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) - - csurSucceededActual := clusterStagedUpdateRunStatusAbandonedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) - Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) }) }) - - //TODO(britaniar): Add more e2e tests for updateRun Start/Stop Implementation }) // Note that this container cannot run in parallel with other containers. diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index e90bed1fc..a012dfa0b 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -1527,7 +1527,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) }) - Context("Test resource rollout with staged update run by update run states - (NotStarted -> Started -> Stopped -> Abandoned)", Ordered, func() { + Context("Test resource rollout with staged update run by update run states - (Initialize -> Execute)", Ordered, func() { updateRunNames := []string{} var strategy *placementv1beta1.StagedUpdateStrategy @@ -1587,21 +1587,22 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { + By("Creating staged update run in Initialize state") createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) }) - It("Should not start rollout as the update run is in NotStarted state", func() { + It("Should not start rollout as the update run is in Initialize state", func() { By("Member clusters should not have work resources placed") checkIfRemovedConfigMapFromAllMemberClustersConsistently() - By("Validating the sur status remains in NotStarted state") - surNotStartedActual := stagedUpdateRunStatusNotStartedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) - Consistently(surNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep updateRun %s in NotStarted state", updateRunNames[0]) + By("Validating the sur status remains in Initialize state") + surNotStartedActual := stagedUpdateRunStatusInitializedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Consistently(surNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to Initialize updateRun %s/%s ", testNamespace, updateRunNames[0]) }) - It("Should rollout resources to member-cluster-2 only after update run is in Started state", func() { - // Update the update run state to Started. - By("Updating the update run state to Started") + It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { + // Update the update run state to Execute. + By("Updating the update run state to Execute") updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateStarted) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) @@ -1609,45 +1610,23 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem By("Validating crp status as member-cluster-2 updated") rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) - Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) - }) - - It("Should stop update run when updated to Stopped state", func() { - // Update the update run state to Stopped. - By("Updating the update run state to Stopped") - updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateStopped) - - By("Validating no further rollouts happen after stopping") - checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) - checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) - - By("Validating crp status as member-cluster-2 updated only") - rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) - Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", testNamespace, rpName) - surSucceededActual := stagedUpdateRunStatusStoppedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) - Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envCanary) }) - It("Should abandon update run when updated to Abandoned state", func() { - // Update the update run state to Abandoned. - By("Updating the update run state to Abandoned") - updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateAbandoned) - - By("Validating no further rollouts happen after abandonment") - checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) - checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) - - By("Validating crp status as member-cluster-2 updated only") - rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) - Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + It("Should rollout resources to all the members and complete the staged update run successfully", func() { + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) + checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) + }) - surSucceededActual := stagedUpdateRunStatusAbandonedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) - Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) + It("Should update rp status as completed", func() { + rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(appConfigMapIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", testNamespace, rpName) }) }) - - //TODO(britaniar): Add more e2e tests for updateRun Start/Stop Implementation }) func createStagedUpdateStrategySucceed(strategyName, namespace string) *placementv1beta1.StagedUpdateStrategy { From e961c92149e531a292061aa912d7ad27f7bb0852 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Mon, 1 Dec 2025 13:28:19 -0600 Subject: [PATCH 3/9] update to use a default value for RequeueAfter Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/updaterun/controller.go | 6 +++--- pkg/controllers/updaterun/execution.go | 3 ++- pkg/utils/common.go | 9 +++++++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 227887242..17caa3ac6 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // Update the status to indicate that the updateRun is initializing. // Requeue immediately to continue with initialization. klog.V(2).InfoS("The updateRun is initializing", "state", state, "updateRun", runObjRef) - return runtime.Result{RequeueAfter: 1}, r.recordUpdateRunInitializing(ctx, updateRun) + return runtime.Result{RequeueAfter: utils.DefaultRequeueAfterDuration}, r.recordUpdateRunInitializing(ctx, updateRun) } var initErr error @@ -188,9 +188,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim return runtime.Result{}, execErr } if waitTime == 0 { - // If update run is not finished and waitTime is zero, the waitTime needs to be update to a non-zero value + // If update run is not finished and the waitTime needs to be update to a non-zero value or default requeue duration, // as we are using RequeueAfter only since Requeue is deprecated. - return runtime.Result{RequeueAfter: 1}, nil + return runtime.Result{RequeueAfter: utils.DefaultRequeueAfterDuration}, nil } return runtime.Result{RequeueAfter: waitTime}, nil } diff --git a/pkg/controllers/updaterun/execution.go b/pkg/controllers/updaterun/execution.go index 1180ae34a..1d5a99da4 100644 --- a/pkg/controllers/updaterun/execution.go +++ b/pkg/controllers/updaterun/execution.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" bindingutils "github.com/kubefleet-dev/kubefleet/pkg/utils/binding" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" @@ -428,7 +429,7 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta } } if passed { - afterStageWaitTime = 0 + afterStageWaitTime = utils.DefaultRequeueAfterDuration } return passed, afterStageWaitTime, nil } diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 63225001b..4f7e73623 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -54,6 +54,15 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) +const ( + // DefaultRequeueAfterDuration is the default duration after which to requeue a reconcile request. + // This is used when a controller wants to requeue immediately. A small duration is used to mimic immediate requeue + // that Controller-Runtimes deprecated Requeue=true previously provided. + // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/pull/3107 + // The value needs to be small enough to avoid noticeable delay, but greater than 0 as RequeueAfter=0 is treated as no requeue. + DefaultRequeueAfterDuration = time.Microsecond * 1 +) + const ( kubePrefix = "kube-" fleetPrefix = "fleet-" From 0bc6292513c311ce4fa170e242d6835989c59935 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Mon, 1 Dec 2025 18:42:16 -0600 Subject: [PATCH 4/9] address comments Signed-off-by: Britania Rodriguez Reyes --- apis/placement/v1beta1/stageupdate_types.go | 10 +-- pkg/controllers/updaterun/controller.go | 12 +--- .../updaterun/controller_integration_test.go | 12 +--- pkg/controllers/updaterun/execution.go | 3 +- .../updaterun/execution_integration_test.go | 8 +-- pkg/controllers/updaterun/initialization.go | 19 ------ .../api_validation_integration_test.go | 32 ++++----- test/e2e/actuals_test.go | 65 +++++++++---------- test/e2e/cluster_staged_updaterun_test.go | 48 +++++++------- test/e2e/staged_updaterun_test.go | 36 +++++----- 10 files changed, 101 insertions(+), 144 deletions(-) diff --git a/apis/placement/v1beta1/stageupdate_types.go b/apis/placement/v1beta1/stageupdate_types.go index 588e38136..a155d8125 100644 --- a/apis/placement/v1beta1/stageupdate_types.go +++ b/apis/placement/v1beta1/stageupdate_types.go @@ -152,13 +152,14 @@ func (c *ClusterStagedUpdateRun) SetUpdateRunStatus(status UpdateRunStatus) { type State string const ( - // StateNotStarted describes user intent to initialize but not execute the update run. + // StateInitialize describes user intent to initialize but not execute the update run. // This is the default state when an update run is created. - StateNotStarted State = "Initialize" + // Users can subsequently set the state to Execute or Abandon. + StateInitialize State = "Initialize" - // StateStarted describes user intent to execute (or resume execution if paused). + // StateExecute describes user intent to execute (or resume execution if paused). // Users can subsequently set the state to Pause or Abandon. - StateStarted State = "Execute" + StateExecute State = "Execute" // StateStopped describes user intent to pause the update run. // Users can subsequently set the state to Execute or Abandon. @@ -426,7 +427,6 @@ const ( // Its condition status can be one of the following: // - "True": The staged update run is initialized successfully. // - "False": The staged update run encountered an error during initialization and aborted. - // - "Unknown": The staged update run initialization has started. StagedUpdateRunConditionInitialized StagedUpdateRunConditionType = "Initialized" // StagedUpdateRunConditionProgressing indicates whether the staged update run is making progress. diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 17caa3ac6..19b8cfaf0 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -120,13 +120,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim return runtime.Result{}, nil } - if initCond == nil { - // Update the status to indicate that the updateRun is initializing. - // Requeue immediately to continue with initialization. - klog.V(2).InfoS("The updateRun is initializing", "state", state, "updateRun", runObjRef) - return runtime.Result{RequeueAfter: utils.DefaultRequeueAfterDuration}, r.recordUpdateRunInitializing(ctx, updateRun) - } - + // Initialize the updateRun. var initErr error if toBeUpdatedBindings, toBeDeletedBindings, initErr = r.initialize(ctx, updateRun); initErr != nil { klog.ErrorS(initErr, "Failed to initialize the updateRun", "updateRun", runObjRef) @@ -165,7 +159,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } // Execute the updateRun. - if state == placementv1beta1.StateStarted { + if state == placementv1beta1.StateExecute { klog.V(2).InfoS("Continue to execute the updateRun", "state", state, "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) finished, waitTime, execErr := r.execute(ctx, updateRun, updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings) if errors.Is(execErr, errStagedUpdatedAborted) { @@ -188,7 +182,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim return runtime.Result{}, execErr } if waitTime == 0 { - // If update run is not finished and the waitTime needs to be update to a non-zero value or default requeue duration, + // If update run is not finished and the waitTime needs to be updated to a non-zero value or default requeue duration, // as we are using RequeueAfter only since Requeue is deprecated. return runtime.Result{RequeueAfter: utils.DefaultRequeueAfterDuration}, nil } diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index 2867d556f..5f4a6a5e2 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -292,16 +292,6 @@ func generateInitializationFailedMetric(updateRun *placementv1beta1.ClusterStage } } -func generateInitializationUnknownMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { - return &prometheusclientmodel.Metric{ - Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionInitialized), - string(metav1.ConditionUnknown), condition.UpdateRunInitializingReason), - Gauge: &prometheusclientmodel.Gauge{ - Value: ptr.To(float64(time.Now().UnixNano()) / 1e9), - }, - } -} - func generateProgressingMetric(updateRun *placementv1beta1.ClusterStagedUpdateRun) *prometheusclientmodel.Metric { return &prometheusclientmodel.Metric{ Label: generateMetricsLabels(updateRun, string(placementv1beta1.StagedUpdateRunConditionProgressing), @@ -361,7 +351,7 @@ func generateTestClusterStagedUpdateRun() *placementv1beta1.ClusterStagedUpdateR PlacementName: testCRPName, ResourceSnapshotIndex: testResourceSnapshotIndex, StagedUpdateStrategyName: testUpdateStrategyName, - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, }, } } diff --git a/pkg/controllers/updaterun/execution.go b/pkg/controllers/updaterun/execution.go index 1d5a99da4..1180ae34a 100644 --- a/pkg/controllers/updaterun/execution.go +++ b/pkg/controllers/updaterun/execution.go @@ -35,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils" bindingutils "github.com/kubefleet-dev/kubefleet/pkg/utils/binding" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" @@ -429,7 +428,7 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta } } if passed { - afterStageWaitTime = utils.DefaultRequeueAfterDuration + afterStageWaitTime = 0 } return passed, afterStageWaitTime, nil } diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index d1d9f1a78..edffac84a 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -24,7 +24,7 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - io_prometheus_client "github.com/prometheus/client_model/go" + promclient "github.com/prometheus/client_model/go" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1519,10 +1519,10 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { }) Context("Cluster staged update run should update clusters one by one - different states (Initialized -> Execute)", Ordered, func() { - var wantMetrics []*io_prometheus_client.Metric + var wantMetrics []*promclient.Metric BeforeAll(func() { By("Creating a new clusterStagedUpdateRun") - updateRun.Spec.State = placementv1beta1.StateNotStarted + updateRun.Spec.State = placementv1beta1.StateInitialize Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) By("Validating the initialization succeeded and but not execution started") @@ -1552,7 +1552,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { It("Should start execution after changing the state to Execute", func() { By("Updating the updateRun state to Execute") - updateRun.Spec.State = placementv1beta1.StateStarted + updateRun.Spec.State = placementv1beta1.StateExecute Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") By("Validating the execution has started") diff --git a/pkg/controllers/updaterun/initialization.go b/pkg/controllers/updaterun/initialization.go index 0fcd32e77..60edac847 100644 --- a/pkg/controllers/updaterun/initialization.go +++ b/pkg/controllers/updaterun/initialization.go @@ -629,25 +629,6 @@ func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRu return nil } -// recordUpdateRunInitializing records the unknown initialization condition in the UpdateRun status. -// The UpdateRun is currently initializing. -func (r *Reconciler) recordUpdateRunInitializing(ctx context.Context, updateRun placementv1beta1.UpdateRunObj) error { - updateRunStatus := updateRun.GetUpdateRunStatus() - meta.SetStatusCondition(&updateRunStatus.Conditions, metav1.Condition{ - Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), - Status: metav1.ConditionUnknown, - ObservedGeneration: updateRun.GetGeneration(), - Reason: condition.UpdateRunInitializingReason, - Message: "the UpdateRun is in the process of initializing", - }) - if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { - klog.ErrorS(updateErr, "Failed to update the UpdateRun status as initializing", "updateRun", klog.KObj(updateRun)) - // updateErr can be retried. - return controller.NewUpdateIgnoreConflictError(updateErr) - } - return nil -} - // recordInitializationFailed records the failed initialization condition in the updateRun status. func (r *Reconciler) recordInitializationFailed(ctx context.Context, updateRun placementv1beta1.UpdateRunObj, message string) error { updateRunStatus := updateRun.GetUpdateRunStatus() diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 03aa7895e..aa0b75282 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1213,12 +1213,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { PlacementName: "test-placement", ResourceSnapshotIndex: "1", StagedUpdateStrategyName: "test-strategy", - State: placementv1beta1.StateNotStarted, + State: placementv1beta1.StateInitialize, }, } Expect(hubClient.Create(ctx, &updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateStarted + updateRun.Spec.State = placementv1beta1.StateExecute Expect(hubClient.Update(ctx, &updateRun)).Should(Succeed()) Expect(hubClient.Delete(ctx, &updateRun)).Should(Succeed()) }) @@ -1823,7 +1823,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateNotStarted, + State: placementv1beta1.StateInitialize, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1843,7 +1843,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }, } Expect(hubClient.Create(ctx, updateRunWithDefaultState)).Should(Succeed()) - Expect(updateRunWithDefaultState.Spec.State).To(Equal(placementv1beta1.StateNotStarted)) + Expect(updateRunWithDefaultState.Spec.State).To(Equal(placementv1beta1.StateInitialize)) Expect(hubClient.Delete(ctx, updateRunWithDefaultState)).Should(Succeed()) }) @@ -1857,12 +1857,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - Expect(updateRun.Spec.State).To(Equal(placementv1beta1.StateNotStarted)) + Expect(updateRun.Spec.State).To(Equal(placementv1beta1.StateInitialize)) Expect(hubClient.Delete(ctx, updateRun)).Should(Succeed()) }) It("should allow transition from Initialize to Execute", func() { - updateRun.Spec.State = placementv1beta1.StateStarted + updateRun.Spec.State = placementv1beta1.StateExecute Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) }) @@ -1882,7 +1882,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1913,7 +1913,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1927,7 +1927,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should allow transition from Pause to Execute", func() { - updateRun.Spec.State = placementv1beta1.StateStarted + updateRun.Spec.State = placementv1beta1.StateExecute Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) }) @@ -1953,7 +1953,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateNotStarted, + State: placementv1beta1.StateInitialize, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1971,12 +1971,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateNotStarted + updateRun.Spec.State = placementv1beta1.StateInitialize err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -1989,7 +1989,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1999,7 +1999,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) // Try to transition back to Initialize - updateRun.Spec.State = placementv1beta1.StateNotStarted + updateRun.Spec.State = placementv1beta1.StateInitialize err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -2017,7 +2017,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateNotStarted + updateRun.Spec.State = placementv1beta1.StateInitialize err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -2035,7 +2035,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateStarted + updateRun.Spec.State = placementv1beta1.StateExecute err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index d3999f76b..2d123814b 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -2178,24 +2178,7 @@ func clusterStagedUpdateRunStatusInitializedActual( UpdateStrategySnapshot: wantStrategySpec, } - stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) - for i, stage := range wantStrategySpec.Stages { - stagesStatus[i].StageName = stage.Name - stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) - for j := range stagesStatus[i].Clusters { - stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] - stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] - stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] - } - stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) - for j, task := range stage.AfterStageTasks { - stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type - if task.Type == placementv1beta1.StageTaskTypeApproval { - stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) - } - } - } - wantStatus.StagesStatus = stagesStatus + wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { @@ -2231,24 +2214,7 @@ func stagedUpdateRunStatusInitializedActual( UpdateStrategySnapshot: wantStrategySpec, } - stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) - for i, stage := range wantStrategySpec.Stages { - stagesStatus[i].StageName = stage.Name - stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) - for j := range stagesStatus[i].Clusters { - stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] - stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] - stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] - } - stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) - for j, task := range stage.AfterStageTasks { - stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type - if task.Type == placementv1beta1.StageTaskTypeApproval { - stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) - } - } - } - wantStatus.StagesStatus = stagesStatus + wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { @@ -2293,6 +2259,33 @@ func stagedUpdateRunStatusSucceededActual( } } +func buildStageUpdatingStatusesForInitialized( + wantStrategySpec *placementv1beta1.UpdateStrategySpec, + wantSelectedClusters [][]string, + wantCROs map[string][]string, + wantROs map[string][]placementv1beta1.NamespacedName, + updateRun placementv1beta1.UpdateRunObj, +) []placementv1beta1.StageUpdatingStatus { + stagesStatus := make([]placementv1beta1.StageUpdatingStatus, len(wantStrategySpec.Stages)) + for i, stage := range wantStrategySpec.Stages { + stagesStatus[i].StageName = stage.Name + stagesStatus[i].Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(wantSelectedClusters[i])) + for j := range stagesStatus[i].Clusters { + stagesStatus[i].Clusters[j].ClusterName = wantSelectedClusters[i][j] + stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] + stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] + } + stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) + for j, task := range stage.AfterStageTasks { + stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + } + } + } + return stagesStatus +} + func buildStageUpdatingStatuses( wantStrategySpec *placementv1beta1.UpdateStrategySpec, wantSelectedClusters [][]string, diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index 40ffeb209..c39e5f389 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -314,7 +314,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -378,7 +378,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -426,7 +426,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -539,7 +539,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -593,7 +593,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -648,7 +648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -743,7 +743,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -796,7 +796,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -850,7 +850,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -1026,7 +1026,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1135,7 +1135,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1251,7 +1251,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) // Verify rollout to canary cluster first By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1325,7 +1325,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should create a staged update run and verify cluster approval request is created", func() { validateLatestClusterResourceSnapshot(crpName, resourceSnapshotIndex1st) validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) // Verify that cluster approval request is created for canary stage. Eventually(func() error { @@ -1422,7 +1422,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create updateRun and verify resources are rolled out", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) // Approval for AfterStageTasks of canary stage validateAndApproveClusterApprovalRequests(updateRunName, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1558,7 +1558,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the cluster staged update run with all 3 clusters updated in parallel", func() { @@ -1648,7 +1648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the cluster staged update run with all 3 clusters", func() { @@ -1730,7 +1730,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should create a cluster staged update run successfully", func() { By("Creating Cluster Staged Update Run in state Initialize") - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialize) }) It("Should not start rollout as the update run is in Initialize state", func() { @@ -1745,7 +1745,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { // Update the update run state to Execute By("Updating the update run state to Execute") - updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateStarted) + updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateExecute) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) @@ -1836,7 +1836,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) By("Creating the first staged update run") - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) By("Validating staged update run has succeeded") csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) @@ -1887,7 +1887,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should create another staged update run for the same CRP", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 2) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the second staged update run and complete the CRP", func() { @@ -1935,7 +1935,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the staged update run, complete CRP, and rollout resources to all member clusters", func() { @@ -1978,7 +1978,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) }) It("Should complete the staged update run, complete CRP, and rollout updated resources to all member clusters", func() { @@ -2017,7 +2017,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the staged update run, complete CRP, and re-place resources to all member clusters", func() { @@ -2184,7 +2184,7 @@ func createClusterStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunNam Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, PlacementName: crpName, StagedUpdateStrategyName: strategyName, }, diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index a012dfa0b..3bbcc6b5a 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -300,7 +300,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -364,7 +364,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -412,7 +412,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -523,7 +523,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -577,7 +577,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -632,7 +632,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -725,7 +725,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -778,7 +778,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -832,7 +832,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -980,7 +980,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1084,7 +1084,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1198,7 +1198,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) // Verify rollout to canary cluster first. By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1284,7 +1284,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create updateRun and verify resources are rolled out", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) // Approval for AfterStageTask of canary stage validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1419,7 +1419,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the staged update run with all 3 clusters updated in parallel", func() { @@ -1508,7 +1508,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateStarted) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should complete the staged update run with all 3 clusters", func() { @@ -1588,7 +1588,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should create a staged update run successfully", func() { By("Creating staged update run in Initialize state") - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateNotStarted) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) }) It("Should not start rollout as the update run is in Initialize state", func() { @@ -1603,7 +1603,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { // Update the update run state to Execute. By("Updating the update run state to Execute") - updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateStarted) + updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateExecute) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) @@ -1745,7 +1745,7 @@ func createStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunName, name Namespace: namespace, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateStarted, + State: placementv1beta1.StateExecute, PlacementName: rpName, StagedUpdateStrategyName: strategyName, }, From 5f1aa53b6c5b174ae91ff65f2d3e8db67b90e8f5 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Mon, 1 Dec 2025 20:54:32 -0600 Subject: [PATCH 5/9] minor fix Signed-off-by: Britania Rodriguez Reyes --- test/e2e/staged_updaterun_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index 3bbcc6b5a..049cb1c00 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -1588,7 +1588,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should create a staged update run successfully", func() { By("Creating staged update run in Initialize state") - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialize) }) It("Should not start rollout as the update run is in Initialize state", func() { From e8659b7b637ef767dac626e366030158377d84c9 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Tue, 2 Dec 2025 13:19:51 -0600 Subject: [PATCH 6/9] rebase and fix Signed-off-by: Britania Rodriguez Reyes --- .../updaterun/controller_integration_test.go | 17 +----- .../updaterun/execution_integration_test.go | 52 +++++++++---------- .../updaterun/validation_integration_test.go | 3 +- test/e2e/actuals_test.go | 20 +++---- test/e2e/cluster_staged_updaterun_test.go | 4 +- test/e2e/staged_updaterun_test.go | 4 +- 6 files changed, 43 insertions(+), 57 deletions(-) diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index 5f4a6a5e2..bb4b837a0 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -818,23 +818,8 @@ func generateFalseCondition(obj client.Object, condType any) metav1.Condition { } } -func generateFalseProgressingCondition(obj client.Object, condType any, succeeded bool) metav1.Condition { +func generateFalseProgressingCondition(obj client.Object, condType any, reason string) metav1.Condition { falseCond := generateFalseCondition(obj, condType) - reason := "" - switch condType { - case placementv1beta1.StagedUpdateRunConditionProgressing: - if succeeded { - reason = condition.UpdateRunSucceededReason - } else { - reason = condition.UpdateRunFailedReason - } - case placementv1beta1.StageUpdatingConditionProgressing: - if succeeded { - reason = condition.StageUpdatingSucceededReason - } else { - reason = condition.StageUpdatingFailedReason - } - } falseCond.Reason = reason return falseCond } diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index edffac84a..3720ed9ed 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -190,7 +190,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { It("Should not start rolling out 1st stage", func() { By("Validating the 1st clusterResourceBinding is not updated to Bound") binding := resourceBindings[numTargetClusters-1] // cluster-9 - validateNotBoundBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + validateNotBoundBindingState(ctx, binding) By("Validating the 1st stage does not have startTime set") Expect(updateRun.Status.StagesStatus[0].StartTime).Should(BeNil()) @@ -345,7 +345,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) // 1st stage completed, mark progressing condition reason as succeeded and add succeeded condition. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // 2nd stage waiting for before stage tasks. wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) @@ -396,7 +396,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { It("Should not start rolling out 2nd stage", func() { By("Validating the 1st clusterResourceBinding is not updated to Bound") binding := resourceBindings[0] // cluster-0 - validateNotBoundBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 1) + validateNotBoundBindingState(ctx, binding) By("Validating the 1st stage does not have startTime set") Expect(updateRun.Status.StagesStatus[1].StartTime).Should(BeNil()) @@ -549,7 +549,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionApprovalRequestApproved)) wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[1].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[1].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) meta.SetStatusCondition(&wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) @@ -595,7 +595,7 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { return fmt.Errorf("binding %s is not deleted", binding.Name) } if !apierrors.IsNotFound(err) { - return fmt.Errorf("Get binding %s does not return a not-found error: %w", binding.Name, err) + return fmt.Errorf("get binding %s does not return a not-found error: %w", binding.Name, err) } } return nil @@ -606,10 +606,10 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) } // Mark the stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.DeletionStageStatus.Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -692,9 +692,9 @@ var _ = Describe("UpdateRun execution tests - double stages", func() { By("Validating the updateRun has failed") wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, false) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingFailedReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -865,13 +865,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -971,13 +971,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1109,7 +1109,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1205,7 +1205,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1321,7 +1321,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { It("Should not start rolling out", func() { By("Validating the 1st clusterResourceBinding is not updated to Bound") binding := resourceBindings[0] // cluster-0 - validateNotBoundBindingState(ctx, binding, resourceSnapshot.Name, updateRun, 0) + validateNotBoundBindingState(ctx, binding) By("Validating the 1st stage does not have startTime set") Expect(updateRun.Status.StagesStatus[0].StartTime).Should(BeNil()) @@ -1484,13 +1484,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 1st stage has completed") wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageTaskConditionWaitTimeElapsed)) - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + meta.SetStatusCondition(&wantStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason)) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) // Need to have a longer wait time for the test to pass, because of the long wait time specified in the update strategy. timeout = time.Second * 90 @@ -1547,7 +1547,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 1st clusterResourceBinding is updated to NOT Bound") binding := resourceBindings[0] // cluster-0 - validateNotBindingState(ctx, binding) + validateNotBoundBindingState(ctx, binding) }) It("Should start execution after changing the state to Execute", func() { @@ -1615,13 +1615,13 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { By("Validating the 3rd cluster has succeeded and stage waiting for AfterStageTasks") wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true) + wantStatus.StagesStatus[0].Conditions[0] = generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason) wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark the deletion stage progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, true)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateFalseProgressingCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing, condition.StageUpdatingSucceededReason)) wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // Mark updateRun progressing condition as false with succeeded reason and add succeeded condition. - wantStatus.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, true) + wantStatus.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunSucceededReason) wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") @@ -1665,7 +1665,7 @@ func validateBindingState(ctx context.Context, binding *placementv1beta1.Cluster }, timeout, interval).Should(Succeed(), "failed to validate the binding state") } -func validateNotBoundBindingState(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, resourceSnapshotName string, updateRun *placementv1beta1.ClusterStagedUpdateRun, stage int) { +func validateNotBoundBindingState(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding) { Consistently(func() error { if err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding); err != nil { return err diff --git a/pkg/controllers/updaterun/validation_integration_test.go b/pkg/controllers/updaterun/validation_integration_test.go index b1190601e..b0d55f5f2 100644 --- a/pkg/controllers/updaterun/validation_integration_test.go +++ b/pkg/controllers/updaterun/validation_integration_test.go @@ -34,6 +34,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" ) var _ = Describe("UpdateRun validation tests", func() { @@ -564,7 +565,7 @@ func generateFailedValidationStatus( updateRun *placementv1beta1.ClusterStagedUpdateRun, started *placementv1beta1.UpdateRunStatus, ) *placementv1beta1.UpdateRunStatus { - started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, false) + started.Conditions[1] = generateFalseProgressingCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing, condition.UpdateRunFailedReason) started.Conditions = append(started.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) return started } diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 2d123814b..b78b72717 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -2040,17 +2040,6 @@ func updateRunStageRolloutSucceedConditions(generation int64) []metav1.Condition } } -func updateRunStageRolloutWaitingConditions(generation int64) []metav1.Condition { - return []metav1.Condition{ - { - Type: string(placementv1beta1.StageUpdatingConditionProgressing), - Status: metav1.ConditionFalse, - Reason: condition.StageUpdatingWaitingReason, - ObservedGeneration: generation, - }, - } -} - func updateRunStageTaskSucceedConditions(generation int64, taskType placementv1beta1.StageTaskType) []metav1.Condition { if taskType == placementv1beta1.StageTaskTypeApproval { return []metav1.Condition{ @@ -2275,11 +2264,18 @@ func buildStageUpdatingStatusesForInitialized( stagesStatus[i].Clusters[j].ClusterResourceOverrideSnapshots = wantCROs[wantSelectedClusters[i][j]] stagesStatus[i].Clusters[j].ResourceOverrideSnapshots = wantROs[wantSelectedClusters[i][j]] } + stagesStatus[i].BeforeStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.BeforeStageTasks)) + for j, task := range stage.BeforeStageTasks { + stagesStatus[i].BeforeStageTaskStatus[j].Type = task.Type + if task.Type == placementv1beta1.StageTaskTypeApproval { + stagesStatus[i].BeforeStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.BeforeStageApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + } + } stagesStatus[i].AfterStageTaskStatus = make([]placementv1beta1.StageTaskStatus, len(stage.AfterStageTasks)) for j, task := range stage.AfterStageTasks { stagesStatus[i].AfterStageTaskStatus[j].Type = task.Type if task.Type == placementv1beta1.StageTaskTypeApproval { - stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.GetName(), stage.Name) + stagesStatus[i].AfterStageTaskStatus[j].ApprovalRequestName = fmt.Sprintf(placementv1beta1.AfterStageApprovalTaskNameFmt, updateRun.GetName(), stage.Name) } } } diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index c39e5f389..858c93bd4 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -1754,10 +1754,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) - validateAndApproveClusterApprovalRequests(updateRunNames[0], envCanary) + validateAndApproveClusterApprovalRequests(updateRunNames[0], envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) }) It("Should rollout resources to all the members and complete the cluster staged update run successfully", func() { + validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index 049cb1c00..ca2813e4b 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -1612,10 +1612,12 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem rpStatusUpdatedActual := rpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s/%s status as expected", testNamespace, rpName) - validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envCanary) + validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) }) It("Should rollout resources to all the members and complete the staged update run successfully", func() { + validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) From fc1266504ec0b0f095155287d8d8a7e235892488 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Tue, 2 Dec 2025 14:40:21 -0600 Subject: [PATCH 7/9] revert requeue change Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/updaterun/controller.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 19b8cfaf0..0c43e80b2 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -181,12 +181,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim if execErr != nil { return runtime.Result{}, execErr } - if waitTime == 0 { - // If update run is not finished and the waitTime needs to be updated to a non-zero value or default requeue duration, - // as we are using RequeueAfter only since Requeue is deprecated. - return runtime.Result{RequeueAfter: utils.DefaultRequeueAfterDuration}, nil - } - return runtime.Result{RequeueAfter: waitTime}, nil + return runtime.Result{Requeue: true, RequeueAfter: waitTime}, nil } klog.V(2).InfoS("The updateRun is not started, waiting to be started", "state", state, "updateRun", runObjRef) return runtime.Result{}, nil From bc5f075ed45d07bdcdb0c4b0b1235687dd7af019 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Tue, 2 Dec 2025 18:39:53 -0600 Subject: [PATCH 8/9] address comments Signed-off-by: Britania Rodriguez Reyes --- pkg/controllers/updaterun/controller.go | 4 +- pkg/utils/common.go | 9 -- pkg/utils/condition/reason.go | 3 - .../api_validation_integration_test.go | 11 +-- test/e2e/actuals_test.go | 96 ++++--------------- test/e2e/cluster_staged_updaterun_test.go | 65 +++++++------ test/e2e/staged_updaterun_test.go | 53 +++++----- 7 files changed, 89 insertions(+), 152 deletions(-) diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 0c43e80b2..dcaa5ab49 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -115,7 +115,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // the update run generation increments, but we don't need to reinitialize since initialization is a one-time setup. if !(initCond != nil && initCond.Status == metav1.ConditionTrue) { // Check if initialization failed for the current generation. - if condition.IsConditionStatusFalse(initCond, updateRun.GetGeneration()) { + if initCond != nil && initCond.Status == metav1.ConditionFalse { klog.V(2).InfoS("The updateRun has failed to initialize", "errorMsg", initCond.Message, "updateRun", runObjRef) return runtime.Result{}, nil } @@ -183,7 +183,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } return runtime.Result{Requeue: true, RequeueAfter: waitTime}, nil } - klog.V(2).InfoS("The updateRun is not started, waiting to be started", "state", state, "updateRun", runObjRef) + klog.V(2).InfoS("The updateRun is initialized but not executed, waiting to execute", "state", state, "updateRun", runObjRef) return runtime.Result{}, nil } diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 4f7e73623..63225001b 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -54,15 +54,6 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) -const ( - // DefaultRequeueAfterDuration is the default duration after which to requeue a reconcile request. - // This is used when a controller wants to requeue immediately. A small duration is used to mimic immediate requeue - // that Controller-Runtimes deprecated Requeue=true previously provided. - // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/pull/3107 - // The value needs to be small enough to avoid noticeable delay, but greater than 0 as RequeueAfter=0 is treated as no requeue. - DefaultRequeueAfterDuration = time.Microsecond * 1 -) - const ( kubePrefix = "kube-" fleetPrefix = "fleet-" diff --git a/pkg/utils/condition/reason.go b/pkg/utils/condition/reason.go index 0a6ed34b2..9566ee42e 100644 --- a/pkg/utils/condition/reason.go +++ b/pkg/utils/condition/reason.go @@ -152,9 +152,6 @@ const ( // A group of condition reason string which is used to populate the ClusterStagedUpdateRun condition. const ( - // UpdateRunInitializingReason is the reason string of condition if the update run is currently initializing. - UpdateRunInitializingReason = "UpdateRunInitializing" - // UpdateRunInitializeSucceededReason is the reason string of condition if the update run is initialized successfully. UpdateRunInitializeSucceededReason = "UpdateRunInitializedSuccessfully" diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index aa0b75282..d077656e9 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1913,13 +1913,10 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateStopped, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - // Transition to Pause state first - updateRun.Spec.State = placementv1beta1.StateStopped - Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) }) AfterEach(func() { @@ -1989,15 +1986,11 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateStopped, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - // Transition to Pause first - updateRun.Spec.State = placementv1beta1.StateStopped - Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) - // Try to transition back to Initialize updateRun.Spec.State = placementv1beta1.StateInitialize err := hubClient.Update(ctx, updateRun) diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index b78b72717..b521e65be 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -2116,6 +2116,7 @@ func clusterStagedUpdateRunStatusSucceededActual( wantUnscheduledClusters []string, wantCROs map[string][]string, wantROs map[string][]placementv1beta1.NamespacedName, + execute bool, ) func() error { return func() error { updateRun := &placementv1beta1.ClusterStagedUpdateRun{} @@ -2131,81 +2132,15 @@ func clusterStagedUpdateRunStatusSucceededActual( UpdateStrategySnapshot: wantStrategySpec, } - wantStatus.StagesStatus = buildStageUpdatingStatuses(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) - wantStatus.DeletionStageStatus = buildDeletionStageStatus(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunSucceedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - -func clusterStagedUpdateRunStatusInitializedActual( - updateRunName string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.ClusterStagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, - } - - wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) - if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { - return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) - } - return nil - } -} - -func stagedUpdateRunStatusInitializedActual( - updateRunName, namespace string, - wantResourceIndex string, - wantPolicyIndex string, - wantClusterCount int, - wantApplyStrategy *placementv1beta1.ApplyStrategy, - wantStrategySpec *placementv1beta1.UpdateStrategySpec, - wantSelectedClusters [][]string, - wantUnscheduledClusters []string, - wantCROs map[string][]string, - wantROs map[string][]placementv1beta1.NamespacedName, -) func() error { - return func() error { - updateRun := &placementv1beta1.StagedUpdateRun{} - if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { - return err - } - - wantStatus := placementv1beta1.UpdateRunStatus{ - PolicySnapshotIndexUsed: wantPolicyIndex, - ResourceSnapshotIndexUsed: wantResourceIndex, - PolicyObservedClusterCount: wantClusterCount, - ApplyStrategy: wantApplyStrategy.DeepCopy(), - UpdateStrategySnapshot: wantStrategySpec, + if execute { + wantStatus.StagesStatus = buildStageUpdatingStatuses(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) + wantStatus.DeletionStageStatus = buildDeletionStageStatus(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunSucceedConditions(updateRun.Generation) + } else { + wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) } - - wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) - wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) } @@ -2223,6 +2158,7 @@ func stagedUpdateRunStatusSucceededActual( wantUnscheduledClusters []string, wantCROs map[string][]string, wantROs map[string][]placementv1beta1.NamespacedName, + execute bool, ) func() error { return func() error { updateRun := &placementv1beta1.StagedUpdateRun{} @@ -2238,9 +2174,15 @@ func stagedUpdateRunStatusSucceededActual( UpdateStrategySnapshot: wantStrategySpec, } - wantStatus.StagesStatus = buildStageUpdatingStatuses(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) - wantStatus.DeletionStageStatus = buildDeletionStageStatus(wantUnscheduledClusters, updateRun) - wantStatus.Conditions = updateRunSucceedConditions(updateRun.Generation) + if execute { + wantStatus.StagesStatus = buildStageUpdatingStatuses(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) + wantStatus.DeletionStageStatus = buildDeletionStageStatus(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunSucceedConditions(updateRun.Generation) + } else { + wantStatus.StagesStatus = buildStageUpdatingStatusesForInitialized(wantStrategySpec, wantSelectedClusters, wantCROs, wantROs, updateRun) + wantStatus.DeletionStageStatus = buildDeletionStatusWithoutConditions(wantUnscheduledClusters, updateRun) + wantStatus.Conditions = updateRunInitializedConditions(updateRun.Generation) + } if diff := cmp.Diff(updateRun.Status, wantStatus, updateRunStatusCmpOption...); diff != "" { return fmt.Errorf("UpdateRun status diff (-got, +want): %s", diff) } diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index 858c93bd4..3553aa972 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -155,7 +155,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to all the members after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -231,7 +231,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to all the members after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[1], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) By("Verify that new the configmap is updated on all member clusters") for idx := range allMemberClusters { @@ -335,7 +335,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to all the members after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -410,7 +410,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[1], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) By("Verify that new the configmap is updated on all member clusters") for idx := range allMemberClusters { @@ -458,7 +458,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollback resources to member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[2], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) for idx := range allMemberClusters { configMapActual := configMapPlacedOnClusterActual(allMemberClusters[idx], &oldConfigMap) @@ -560,7 +560,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-1 after approval but not member-cluster-3 and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) @@ -616,7 +616,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[1], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex2nd, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex2nd, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -665,7 +665,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateAndApproveClusterApprovalRequests(updateRunNames[2], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) // need to go through two stages - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex3rd, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0], allMemberClusterNames[1]}, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex3rd, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0], allMemberClusterNames[1]}, nil, nil, true) Eventually(csurSucceededActual, 2*updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[2]) checkIfRemovedWorkResourcesFromMemberClusters([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) checkIfPlacedWorkResourcesOnMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) @@ -763,7 +763,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[2]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) @@ -818,7 +818,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-1 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[1], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -870,7 +870,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should remove resources on member-cluster-1 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[2], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[2], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil, true) Eventually(csurSucceededActual, 2*updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[2]) checkIfRemovedWorkResourcesFromMemberClusters([]*framework.Cluster{allMemberClusters[0]}) checkIfPlacedWorkResourcesOnMemberClustersConsistently([]*framework.Cluster{allMemberClusters[1], allMemberClusters[2]}) @@ -1048,7 +1048,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunName, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, wantCROs, wantROs) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, wantCROs, wantROs, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1150,7 +1150,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should report diff for member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunName, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), applyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), applyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) }) @@ -1265,7 +1265,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateAndApproveClusterApprovalRequests(updateRunName, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) // Verify complete rollout - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) // Verify new configmap is on all member clusters @@ -1353,7 +1353,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should complete the staged update run after approval", func() { - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1430,7 +1430,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { // Approval for BeforeStageTasks of prod stage validateAndApproveClusterApprovalRequests(updateRunName, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) @@ -1564,7 +1564,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should complete the cluster staged update run with all 3 clusters updated in parallel", func() { // With maxConcurrency=3, all 3 clusters should be updated in parallel. // Each round waits 15 seconds, so total time should be under 20s. - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunParallelEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1655,7 +1655,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { // Since maxConcurrency=70% each round we process 2 clusters in parallel, // so all 3 clusters should be updated in 2 rounds. // Each round waits 15 seconds, so total time should be under 40s. - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunName, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunParallelEventuallyDuration*2, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1738,7 +1738,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfRemovedWorkResourcesFromAllMemberClustersConsistently() By("Validating the csur status remains in Initialize state") - csurNotStartedActual := clusterStagedUpdateRunStatusInitializedActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurNotStartedActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, false) Consistently(csurNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to Initialize updateRun %s", updateRunNames[0]) }) @@ -1760,7 +1760,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to all the members and complete the cluster staged update run successfully", func() { validateAndApproveClusterApprovalRequests(updateRunNames[0], envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1841,7 +1841,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) By("Validating staged update run has succeeded") - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) By("Validating CRP status as completed") @@ -1893,7 +1893,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label }) It("Should complete the second staged update run and complete the CRP", func() { - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1], allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1], allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames[1:], @@ -1941,7 +1941,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label }) It("Should complete the staged update run, complete CRP, and rollout resources to all member clusters", func() { - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, @@ -1984,7 +1984,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label }) It("Should complete the staged update run, complete CRP, and rollout updated resources to all member clusters", func() { - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex2nd, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex2nd, true, allMemberClusterNames, @@ -2023,7 +2023,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label }) It("Should complete the staged update run, complete CRP, and re-place resources to all member clusters", func() { - csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[1], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(csurSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, @@ -2195,11 +2195,18 @@ func createClusterStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunNam } func updateClusterStagedUpdateRunState(updateRunName string, state placementv1beta1.State) { - updateRun := &placementv1beta1.ClusterStagedUpdateRun{} - Expect(hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun)).To(Succeed(), "Failed to get ClusterStagedUpdateRun %s", updateRunName) + Eventually(func() error { + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName}, updateRun); err != nil { + return fmt.Errorf("failed to get ClusterStagedUpdateRun %s", updateRunName) + } - updateRun.Spec.State = state - Expect(hubClient.Update(ctx, updateRun)).To(Succeed(), "Failed to update ClusterStagedUpdateRun %s", updateRunName) + updateRun.Spec.State = state + if err := hubClient.Update(ctx, updateRun); err != nil { + return fmt.Errorf("failed to update ClusterStagedUpdateRun %s", updateRunName) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ClusterStagedUpdateRun %s state to %s", updateRunName, state) } func validateAndApproveClusterApprovalRequests(updateRunName, stageName, approvalRequestNameFmt string) { diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index ca2813e4b..e10960fb6 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -145,7 +145,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to all the members after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -219,7 +219,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to all the members after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[1], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[1]) By("Verify that new the configmap is updated on all member clusters") for idx := range allMemberClusters { @@ -321,7 +321,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to all the members after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -396,7 +396,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-1 and member-cluster-3 after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[1], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[1]) By("Verify that new the configmap is updated on all member clusters") for idx := range allMemberClusters { @@ -444,7 +444,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollback resources to member-cluster-1 and member-cluster-3 after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[2], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[1]) for idx := range allMemberClusters { configMapActual := configMapPlacedOnClusterActual(allMemberClusters[idx], &oldConfigMap) @@ -544,7 +544,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-1 after approval but not member-cluster-3 and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) @@ -600,7 +600,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-3 after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[1], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex2nd, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex2nd, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[1]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -649,7 +649,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem validateAndApproveNamespacedApprovalRequests(updateRunNames[2], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) // need to go through two stages - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex3rd, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0], allMemberClusterNames[1]}, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex3rd, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0], allMemberClusterNames[1]}, nil, nil, true) Eventually(surSucceededActual, 2*updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[2]) checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) checkIfPlacedWorkResourcesOnMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) @@ -745,7 +745,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 1, defaultApplyStrategy, &strategy.Spec, [][]string{{}, {allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[2]}) checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) @@ -800,7 +800,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-1 after approval and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[1], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[1], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[1]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -852,7 +852,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should remove resources on member-cluster-1 after approval and complete the cluster staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[2], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[2], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, 2, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[2]}}, []string{allMemberClusterNames[0]}, nil, nil, true) Eventually(surSucceededActual, 2*updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[2]) checkIfRemovedConfigMapFromMemberClusters([]*framework.Cluster{allMemberClusters[0]}) checkIfPlacedWorkResourcesOnMemberClustersConsistently([]*framework.Cluster{allMemberClusters[1], allMemberClusters[2]}) @@ -1003,7 +1003,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, wantROs) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, wantROs, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1099,7 +1099,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should report diff for member-cluster-1 and member-cluster-3 after approval and complete the cluster staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), applyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), applyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) }) @@ -1212,7 +1212,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) // Verify complete rollout. - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex2nd, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) // Verify new configmap is on all member clusters. @@ -1292,7 +1292,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem // Approval for BeforeStageTask of prod stage validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) @@ -1425,7 +1425,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should complete the staged update run with all 3 clusters updated in parallel", func() { // With maxConcurrency=3, all 3 clusters should be updated in parallel. // Each round waits 15 seconds, so total time should be under 20s. - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunParallelEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1515,7 +1515,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem // Since maxConcurrency=70% each round we process 2 clusters in parallel, // so all 3 clusters should be updated in 2 rounds. // Each round waits 15 seconds, so total time should be under 40s. - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunName, testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunParallelEventuallyDuration*2, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1596,7 +1596,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem checkIfRemovedConfigMapFromAllMemberClustersConsistently() By("Validating the sur status remains in Initialize state") - surNotStartedActual := stagedUpdateRunStatusInitializedActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surNotStartedActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, false) Consistently(surNotStartedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to Initialize updateRun %s/%s ", testNamespace, updateRunNames[0]) }) @@ -1618,7 +1618,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to all the members and complete the staged update run successfully", func() { validateAndApproveNamespacedApprovalRequests(updateRunNames[0], testNamespace, envProd, placementv1beta1.BeforeStageApprovalTaskNameFmt) - surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil) + surSucceededActual := stagedUpdateRunStatusSucceededActual(updateRunNames[0], testNamespace, resourceSnapshotIndex1st, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil, true) Eventually(surSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s/%s succeeded", testNamespace, updateRunNames[0]) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) @@ -1756,11 +1756,18 @@ func createStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunName, name } func updateStagedUpdateRunState(updateRunName, namespace string, state placementv1beta1.State) { - updateRun := &placementv1beta1.StagedUpdateRun{} - Expect(hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun)).To(Succeed(), "Failed to get StagedUpdateRun %s", updateRunName) + Eventually(func() error { + updateRun := &placementv1beta1.StagedUpdateRun{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: updateRunName, Namespace: namespace}, updateRun); err != nil { + return fmt.Errorf("failed to get StagedUpdateRun %s", updateRunName) + } - updateRun.Spec.State = state - Expect(hubClient.Update(ctx, updateRun)).To(Succeed(), "Failed to update StagedUpdateRun %s", updateRunName) + updateRun.Spec.State = state + if err := hubClient.Update(ctx, updateRun); err != nil { + return fmt.Errorf("failed to update StagedUpdateRun %s", updateRunName) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update StagedUpdateRun %s to state %s", updateRunName, state) } func validateAndApproveNamespacedApprovalRequests(updateRunName, namespace, stageName, approvalRequestNameFmt string) { From ebfa890f352472b005206b3b2b63530cee56892b Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes Date: Wed, 3 Dec 2025 12:36:26 -0600 Subject: [PATCH 9/9] update variable name Signed-off-by: Britania Rodriguez Reyes --- apis/placement/v1beta1/stageupdate_types.go | 8 +-- pkg/controllers/updaterun/controller.go | 2 +- .../updaterun/controller_integration_test.go | 2 +- .../updaterun/execution_integration_test.go | 6 +-- .../api_validation_integration_test.go | 28 +++++------ test/e2e/cluster_staged_updaterun_test.go | 50 +++++++++---------- test/e2e/staged_updaterun_test.go | 36 ++++++------- 7 files changed, 66 insertions(+), 66 deletions(-) diff --git a/apis/placement/v1beta1/stageupdate_types.go b/apis/placement/v1beta1/stageupdate_types.go index a155d8125..bc7a8d212 100644 --- a/apis/placement/v1beta1/stageupdate_types.go +++ b/apis/placement/v1beta1/stageupdate_types.go @@ -152,14 +152,14 @@ func (c *ClusterStagedUpdateRun) SetUpdateRunStatus(status UpdateRunStatus) { type State string const ( - // StateInitialize describes user intent to initialize but not execute the update run. + // StateInitialized describes user intent to initialize but not execute the update run. // This is the default state when an update run is created. // Users can subsequently set the state to Execute or Abandon. - StateInitialize State = "Initialize" + StateInitialized State = "Initialize" - // StateExecute describes user intent to execute (or resume execution if paused). + // StateExecuted describes user intent to execute (or resume execution if paused). // Users can subsequently set the state to Pause or Abandon. - StateExecute State = "Execute" + StateExecuted State = "Execute" // StateStopped describes user intent to pause the update run. // Users can subsequently set the state to Execute or Abandon. diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index dcaa5ab49..efd1ced2c 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -159,7 +159,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } // Execute the updateRun. - if state == placementv1beta1.StateExecute { + if state == placementv1beta1.StateExecuted { klog.V(2).InfoS("Continue to execute the updateRun", "state", state, "updatingStageIndex", updatingStageIndex, "updateRun", runObjRef) finished, waitTime, execErr := r.execute(ctx, updateRun, updatingStageIndex, toBeUpdatedBindings, toBeDeletedBindings) if errors.Is(execErr, errStagedUpdatedAborted) { diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index bb4b837a0..e017d170e 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -351,7 +351,7 @@ func generateTestClusterStagedUpdateRun() *placementv1beta1.ClusterStagedUpdateR PlacementName: testCRPName, ResourceSnapshotIndex: testResourceSnapshotIndex, StagedUpdateStrategyName: testUpdateStrategyName, - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateExecuted, }, } } diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index 3720ed9ed..3d2af3627 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -1518,11 +1518,11 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { }) }) - Context("Cluster staged update run should update clusters one by one - different states (Initialized -> Execute)", Ordered, func() { + Context("Cluster staged update run should update clusters one by one - different states (Initialized -> Executed)", Ordered, func() { var wantMetrics []*promclient.Metric BeforeAll(func() { By("Creating a new clusterStagedUpdateRun") - updateRun.Spec.State = placementv1beta1.StateInitialize + updateRun.Spec.State = placementv1beta1.StateInitialized Expect(k8sClient.Create(ctx, updateRun)).To(Succeed()) By("Validating the initialization succeeded and but not execution started") @@ -1552,7 +1552,7 @@ var _ = Describe("UpdateRun execution tests - single stage", func() { It("Should start execution after changing the state to Execute", func() { By("Updating the updateRun state to Execute") - updateRun.Spec.State = placementv1beta1.StateExecute + updateRun.Spec.State = placementv1beta1.StateExecuted Expect(k8sClient.Update(ctx, updateRun)).Should(Succeed(), "failed to update the updateRun state") By("Validating the execution has started") diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index d077656e9..fbf25b969 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1213,12 +1213,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { PlacementName: "test-placement", ResourceSnapshotIndex: "1", StagedUpdateStrategyName: "test-strategy", - State: placementv1beta1.StateInitialize, + State: placementv1beta1.StateInitialized, }, } Expect(hubClient.Create(ctx, &updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateExecute + updateRun.Spec.State = placementv1beta1.StateExecuted Expect(hubClient.Update(ctx, &updateRun)).Should(Succeed()) Expect(hubClient.Delete(ctx, &updateRun)).Should(Succeed()) }) @@ -1823,7 +1823,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateInitialize, + State: placementv1beta1.StateInitialized, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1843,7 +1843,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }, } Expect(hubClient.Create(ctx, updateRunWithDefaultState)).Should(Succeed()) - Expect(updateRunWithDefaultState.Spec.State).To(Equal(placementv1beta1.StateInitialize)) + Expect(updateRunWithDefaultState.Spec.State).To(Equal(placementv1beta1.StateInitialized)) Expect(hubClient.Delete(ctx, updateRunWithDefaultState)).Should(Succeed()) }) @@ -1857,12 +1857,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - Expect(updateRun.Spec.State).To(Equal(placementv1beta1.StateInitialize)) + Expect(updateRun.Spec.State).To(Equal(placementv1beta1.StateInitialized)) Expect(hubClient.Delete(ctx, updateRun)).Should(Succeed()) }) It("should allow transition from Initialize to Execute", func() { - updateRun.Spec.State = placementv1beta1.StateExecute + updateRun.Spec.State = placementv1beta1.StateExecuted Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) }) @@ -1882,7 +1882,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateExecuted, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1924,7 +1924,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { }) It("should allow transition from Pause to Execute", func() { - updateRun.Spec.State = placementv1beta1.StateExecute + updateRun.Spec.State = placementv1beta1.StateExecuted Expect(hubClient.Update(ctx, updateRun)).Should(Succeed()) }) @@ -1950,7 +1950,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateInitialize, + State: placementv1beta1.StateInitialized, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) @@ -1968,12 +1968,12 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateExecuted, }, } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateInitialize + updateRun.Spec.State = placementv1beta1.StateInitialized err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -1992,7 +1992,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) // Try to transition back to Initialize - updateRun.Spec.State = placementv1beta1.StateInitialize + updateRun.Spec.State = placementv1beta1.StateInitialized err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -2010,7 +2010,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateInitialize + updateRun.Spec.State = placementv1beta1.StateInitialized err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) @@ -2028,7 +2028,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { } Expect(hubClient.Create(ctx, updateRun)).Should(Succeed()) - updateRun.Spec.State = placementv1beta1.StateExecute + updateRun.Spec.State = placementv1beta1.StateExecuted err := hubClient.Update(ctx, updateRun) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update ClusterStagedUpdateRun call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) diff --git a/test/e2e/cluster_staged_updaterun_test.go b/test/e2e/cluster_staged_updaterun_test.go index 3553aa972..c56ccfc6c 100644 --- a/test/e2e/cluster_staged_updaterun_test.go +++ b/test/e2e/cluster_staged_updaterun_test.go @@ -314,7 +314,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -378,7 +378,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -426,7 +426,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -539,7 +539,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -593,7 +593,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -648,7 +648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -743,7 +743,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -796,7 +796,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -850,7 +850,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -1026,7 +1026,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1135,7 +1135,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1251,7 +1251,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecuted) // Verify rollout to canary cluster first By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1325,7 +1325,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should create a staged update run and verify cluster approval request is created", func() { validateLatestClusterResourceSnapshot(crpName, resourceSnapshotIndex1st) validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) // Verify that cluster approval request is created for canary stage. Eventually(func() error { @@ -1422,7 +1422,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Create updateRun and verify resources are rolled out", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) // Approval for AfterStageTasks of canary stage validateAndApproveClusterApprovalRequests(updateRunName, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1558,7 +1558,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the cluster staged update run with all 3 clusters updated in parallel", func() { @@ -1648,7 +1648,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) It("Should create a cluster staged update run successfully", func() { - createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the cluster staged update run with all 3 clusters", func() { @@ -1667,7 +1667,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { }) }) - Context("Test resource rollout with staged update run by update run states - (Initialize -> Execute)", Ordered, func() { + Context("Test resource rollout with staged update run by update run states - (Initialized -> Executed)", Ordered, func() { updateRunNames := []string{} var strategy *placementv1beta1.ClusterStagedUpdateStrategy @@ -1730,7 +1730,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should create a cluster staged update run successfully", func() { By("Creating Cluster Staged Update Run in state Initialize") - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialize) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialized) }) It("Should not start rollout as the update run is in Initialize state", func() { @@ -1745,7 +1745,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { // Update the update run state to Execute By("Updating the update run state to Execute") - updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateExecute) + updateClusterStagedUpdateRunState(updateRunNames[0], placementv1beta1.StateExecuted) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) @@ -1838,7 +1838,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) By("Creating the first staged update run") - createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) By("Validating staged update run has succeeded") csurSucceededActual := clusterStagedUpdateRunStatusSucceededActual(updateRunNames[0], resourceSnapshotIndex1st, policySnapshotIndex1st, 3, defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[0], allMemberClusterNames[1], allMemberClusterNames[2]}}, nil, nil, nil, true) @@ -1889,7 +1889,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should create another staged update run for the same CRP", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 2) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the second staged update run and complete the CRP", func() { @@ -1937,7 +1937,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the staged update run, complete CRP, and rollout resources to all member clusters", func() { @@ -1980,7 +1980,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the staged update run, complete CRP, and rollout updated resources to all member clusters", func() { @@ -2019,7 +2019,7 @@ var _ = Describe("Test member cluster join and leave flow with updateRun", Label It("Should reschedule to member cluster 1 and create a new cluster staged update run successfully", func() { validateLatestClusterSchedulingPolicySnapshot(crpName, policySnapshotIndex1st, 3) - createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createClusterStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the staged update run, complete CRP, and re-place resources to all member clusters", func() { @@ -2186,7 +2186,7 @@ func createClusterStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunNam Name: updateRunName, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateExecuted, PlacementName: crpName, StagedUpdateStrategyName: strategyName, }, diff --git a/test/e2e/staged_updaterun_test.go b/test/e2e/staged_updaterun_test.go index e10960fb6..95e9dffe2 100644 --- a/test/e2e/staged_updaterun_test.go +++ b/test/e2e/staged_updaterun_test.go @@ -300,7 +300,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -364,7 +364,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -412,7 +412,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollback resources to member-cluster-2 only and completes stage canary", func() { @@ -523,7 +523,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -577,7 +577,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on member-cluster-1 and member-cluster-2 only and completes stage canary", func() { @@ -632,7 +632,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -725,7 +725,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should not rollout any resources to member clusters and complete stage canary", func() { @@ -778,7 +778,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[1], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on member-cluster-2 and member-cluster-3 only and completes stage canary", func() { @@ -832,7 +832,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a namespaced staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunNames[2], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should still have resources on all member clusters and complete stage canary", func() { @@ -980,7 +980,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should rollout resources to member-cluster-2 only and complete stage canary", func() { @@ -1084,7 +1084,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should report diff for member-cluster-2 only and completes stage canary", func() { @@ -1198,7 +1198,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create a staged update run with new resourceSnapshotIndex and verify rollout happens", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex2nd, strategyName, placementv1beta1.StateExecuted) // Verify rollout to canary cluster first. By("Verify that the new configmap is updated on member-cluster-2 during canary stage") @@ -1284,7 +1284,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Create updateRun and verify resources are rolled out", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) // Approval for AfterStageTask of canary stage validateAndApproveNamespacedApprovalRequests(updateRunName, testNamespace, envCanary, placementv1beta1.AfterStageApprovalTaskNameFmt) @@ -1419,7 +1419,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the staged update run with all 3 clusters updated in parallel", func() { @@ -1508,7 +1508,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem }) It("Should create a staged update run successfully", func() { - createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecute) + createStagedUpdateRunSucceed(updateRunName, testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateExecuted) }) It("Should complete the staged update run with all 3 clusters", func() { @@ -1588,7 +1588,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should create a staged update run successfully", func() { By("Creating staged update run in Initialize state") - createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialize) + createStagedUpdateRunSucceed(updateRunNames[0], testNamespace, rpName, resourceSnapshotIndex1st, strategyName, placementv1beta1.StateInitialized) }) It("Should not start rollout as the update run is in Initialize state", func() { @@ -1603,7 +1603,7 @@ var _ = Describe("test RP rollout with staged update run", Label("resourceplacem It("Should rollout resources to member-cluster-2 only after update run is in Execute state", func() { // Update the update run state to Execute. By("Updating the update run state to Execute") - updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateExecute) + updateStagedUpdateRunState(updateRunNames[0], testNamespace, placementv1beta1.StateExecuted) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedConfigMapFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) @@ -1747,7 +1747,7 @@ func createStagedUpdateRunSucceedWithNoResourceSnapshotIndex(updateRunName, name Namespace: namespace, }, Spec: placementv1beta1.UpdateRunSpec{ - State: placementv1beta1.StateExecute, + State: placementv1beta1.StateExecuted, PlacementName: rpName, StagedUpdateStrategyName: strategyName, },