diff --git a/pkg/controller/restore_finalizer_controller.go b/pkg/controller/restore_finalizer_controller.go index d9aaaa30a6..f90d7cfb6a 100644 --- a/pkg/controller/restore_finalizer_controller.go +++ b/pkg/controller/restore_finalizer_controller.go @@ -19,6 +19,7 @@ package controller import ( "context" "fmt" + storagev1api "k8s.io/api/storage/v1" "sync" "time" @@ -304,6 +305,24 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result return false, err } + // check if pvc is not nil and has a pending status + if pvc != nil && pvc.Status.Phase == v1.ClaimPending { + // check if storage class used has VolumeBindingMode as WaitForFirstConsumer + scName := *pvc.Spec.StorageClassName + sc := &storagev1api.StorageClass{} + err = ctx.crClient.Get(context.Background(), client.ObjectKey{Name: scName}, sc) + + if err != nil { + errs.Add(restoredNamespace, err) + } + // skip PV patch step for this scenario + // because pvc would not be bound and the PV patch step would fail due to timeout thus failing the backup + if *sc.VolumeBindingMode == storagev1api.VolumeBindingWaitForFirstConsumer { + log.Infof("skipping PV patch step: StorageClass %s used by PVC %s has VolumeBindingMode set to WaitForFirstConsumer, and the PVC is also in a pending state", scName, pvc.Name) + return true, nil + } + } + if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { log.Debugf("PVC: %s not ready", pvc.Name) return false, nil