diff --git a/api/v1/cosmosfullnode_types.go b/api/v1/cosmosfullnode_types.go index 6be6d3ce..eae4ce89 100644 --- a/api/v1/cosmosfullnode_types.go +++ b/api/v1/cosmosfullnode_types.go @@ -385,6 +385,11 @@ type AutoDataSource struct { // If no VolumeSnapshots found, controller logs error and still creates PVC. // +optional VolumeSnapshotSelector map[string]string `json:"volumeSnapshotSelector"` + + // If true, the volume snapshot selector will make sure the PVC + // is restored from a VolumeSnapshot on the same node. + // This is useful if the VolumeSnapshots are local to the node, e.g. for topolvm. + MatchInstance bool `json:"matchInstance"` } // RolloutStrategy is an update strategy that can be shared between several Cosmos CRDs. diff --git a/api/v1/self_healing_types.go b/api/v1/self_healing_types.go index 8302a573..90501a57 100644 --- a/api/v1/self_healing_types.go +++ b/api/v1/self_healing_types.go @@ -66,7 +66,7 @@ type HeightDriftMitigationSpec struct { type SelfHealingStatus struct { // PVC auto-scaling status. // +optional - PVCAutoScale *PVCAutoScaleStatus `json:"pvcAutoScale"` + PVCAutoScale map[string]*PVCAutoScaleStatus `json:"pvcAutoScaler"` } type PVCAutoScaleStatus struct { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index b7e88e0a..ea3d8dc5 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -679,8 +679,18 @@ func (in *SelfHealingStatus) DeepCopyInto(out *SelfHealingStatus) { *out = *in if in.PVCAutoScale != nil { in, out := &in.PVCAutoScale, &out.PVCAutoScale - *out = new(PVCAutoScaleStatus) - (*in).DeepCopyInto(*out) + *out = make(map[string]*PVCAutoScaleStatus, len(*in)) + for key, val := range *in { + var outVal *PVCAutoScaleStatus + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(PVCAutoScaleStatus) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } } } diff --git a/config/crd/bases/cosmos.strange.love_cosmosfullnodes.yaml b/config/crd/bases/cosmos.strange.love_cosmosfullnodes.yaml index df036c09..581a229d 100644 --- a/config/crd/bases/cosmos.strange.love_cosmosfullnodes.yaml +++ b/config/crd/bases/cosmos.strange.love_cosmosfullnodes.yaml @@ -386,6 +386,12 @@ spec: set; that field takes precedence. Configuring autoDataSource may help boostrap new replicas more quickly. properties: + matchInstance: + description: If true, the volume snapshot selector will + make sure the PVC is restored from a VolumeSnapshot + on the same node. This is useful if the VolumeSnapshots + are local to the node, e.g. for topolvm. + type: boolean volumeSnapshotSelector: additionalProperties: type: string @@ -397,6 +403,8 @@ spec: no VolumeSnapshots found, controller logs error and still creates PVC. type: object + required: + - matchInstance type: object dataSource: description: 'Can be used to specify either: * An existing @@ -5814,6 +5822,12 @@ spec: that field takes precedence. Configuring autoDataSource may help boostrap new replicas more quickly. properties: + matchInstance: + description: If true, the volume snapshot selector will make + sure the PVC is restored from a VolumeSnapshot on the same + node. This is useful if the VolumeSnapshots are local to + the node, e.g. for topolvm. + type: boolean volumeSnapshotSelector: additionalProperties: type: string @@ -5824,6 +5838,8 @@ spec: namespace as the CosmosFullNode. If no VolumeSnapshots found, controller logs error and still creates PVC. type: object + required: + - matchInstance type: object dataSource: description: 'Can be used to specify either: * An existing VolumeSnapshot @@ -5986,24 +6002,26 @@ spec: selfHealing: description: Status set by the SelfHealing controller. properties: - pvcAutoScale: + pvcAutoScaler: + additionalProperties: + properties: + requestedAt: + description: The timestamp the SelfHealing controller requested + a PVC increase. + format: date-time + type: string + requestedSize: + anyOf: + - type: integer + - type: string + description: The PVC size requested by the SelfHealing controller. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - requestedAt + - requestedSize + type: object description: PVC auto-scaling status. - properties: - requestedAt: - description: The timestamp the SelfHealing controller requested - a PVC increase. - format: date-time - type: string - requestedSize: - anyOf: - - type: integer - - type: string - description: The PVC size requested by the SelfHealing controller. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - requestedAt - - requestedSize type: object type: object status: diff --git a/controllers/cosmosfullnode_controller.go b/controllers/cosmosfullnode_controller.go index f60d370a..b2988091 100644 --- a/controllers/cosmosfullnode_controller.go +++ b/controllers/cosmosfullnode_controller.go @@ -121,7 +121,9 @@ func (r *CosmosFullNodeReconciler) Reconcile(ctx context.Context, req ctrl.Reque syncInfo := fullnode.SyncInfoStatus(ctx, crd, r.cacheController) - defer r.updateStatus(ctx, crd, syncInfo) + pvcStatusChanges := fullnode.PVCStatusChanges{} + + defer r.updateStatus(ctx, crd, syncInfo, &pvcStatusChanges) errs := &kube.ReconcileErrors{} @@ -178,7 +180,7 @@ func (r *CosmosFullNodeReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Reconcile pvcs. - pvcRequeue, err := r.pvcControl.Reconcile(ctx, reporter, crd) + pvcRequeue, err := r.pvcControl.Reconcile(ctx, reporter, crd, &pvcStatusChanges) if err != nil { errs.Append(err) } @@ -221,7 +223,12 @@ func (r *CosmosFullNodeReconciler) resultWithErr(crd *cosmosv1.CosmosFullNode, e return stopResult, err } -func (r *CosmosFullNodeReconciler) updateStatus(ctx context.Context, crd *cosmosv1.CosmosFullNode, syncInfo map[string]*cosmosv1.SyncInfoPodStatus) { +func (r *CosmosFullNodeReconciler) updateStatus( + ctx context.Context, + crd *cosmosv1.CosmosFullNode, + syncInfo map[string]*cosmosv1.SyncInfoPodStatus, + pvcStatusChanges *fullnode.PVCStatusChanges, +) { if err := r.statusClient.SyncUpdate(ctx, client.ObjectKeyFromObject(crd), func(status *cosmosv1.FullNodeStatus) { status.ObservedGeneration = crd.Status.ObservedGeneration status.Phase = crd.Status.Phase @@ -236,6 +243,11 @@ func (r *CosmosFullNodeReconciler) updateStatus(ctx context.Context, crd *cosmos status.Height[k] = *v.Height } } + if status.SelfHealing.PVCAutoScale != nil { + for _, k := range pvcStatusChanges.Deleted { + delete(status.SelfHealing.PVCAutoScale, k) + } + } }); err != nil { log.FromContext(ctx).Error(err, "Failed to patch status") } diff --git a/internal/fullnode/mock_test.go b/internal/fullnode/mock_test.go index 278badcb..1c32b2ad 100644 --- a/internal/fullnode/mock_test.go +++ b/internal/fullnode/mock_test.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -58,6 +59,8 @@ func (m *mockClient[T]) Get(ctx context.Context, key client.ObjectKey, obj clien *ref = m.Object.(corev1.PersistentVolumeClaim) case *cosmosv1.CosmosFullNode: *ref = m.Object.(cosmosv1.CosmosFullNode) + case *snapshotv1.VolumeSnapshot: + *ref = m.Object.(snapshotv1.VolumeSnapshot) default: panic(fmt.Errorf("unknown Object type: %T", m.ObjectList)) } diff --git a/internal/fullnode/pod_builder_test.go b/internal/fullnode/pod_builder_test.go index 27be4668..96fc7d20 100644 --- a/internal/fullnode/pod_builder_test.go +++ b/internal/fullnode/pod_builder_test.go @@ -37,6 +37,11 @@ func defaultCRD() cosmosv1.CosmosFullNode { }, }, }, + VolumeClaimTemplate: cosmosv1.PersistentVolumeClaimSpec{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("100Gi")}, + }, + }, }, } } diff --git a/internal/fullnode/pvc_auto_scaler.go b/internal/fullnode/pvc_auto_scaler.go index 20f8b659..c537f7c7 100644 --- a/internal/fullnode/pvc_auto_scaler.go +++ b/internal/fullnode/pvc_auto_scaler.go @@ -3,11 +3,9 @@ package fullnode import ( "context" "errors" - "fmt" "math" "time" - "github.com/samber/lo" cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,52 +43,68 @@ func NewPVCAutoScaler(client StatusSyncer) *PVCAutoScaler { // Returns an error if patching unsuccessful. func (scaler PVCAutoScaler) SignalPVCResize(ctx context.Context, crd *cosmosv1.CosmosFullNode, results []PVCDiskUsage) (bool, error) { var ( - spec = crd.Spec.SelfHeal.PVCAutoScale - trigger = int(spec.UsedSpacePercentage) - pvcCandidate = lo.MaxBy(results, func(a PVCDiskUsage, b PVCDiskUsage) bool { return a.PercentUsed > b.PercentUsed }) + spec = crd.Spec.SelfHeal.PVCAutoScale + trigger = int(spec.UsedSpacePercentage) ) - // Calc new size first to catch errors with the increase quantity - newSize, err := scaler.calcNextCapacity(pvcCandidate.Capacity, spec.IncreaseQuantity) - if err != nil { - return false, fmt.Errorf("increaseQuantity must be a percentage string (e.g. 10%%) or a storage quantity (e.g. 100Gi): %w", err) - } + var joinedErr error - // Prevent patching if PVC size not at threshold - if pvcCandidate.PercentUsed < trigger { - return false, nil - } + status := crd.Status.SelfHealing.PVCAutoScale + + patches := make(map[string]*cosmosv1.PVCAutoScaleStatus) + + now := metav1.NewTime(scaler.now()) - // Prevent continuous reconcile loops - if status := crd.Status.SelfHealing.PVCAutoScale; status != nil { - if status.RequestedSize.Value() == newSize.Value() { - return false, nil + for _, pvc := range results { + if pvc.PercentUsed < trigger { + // no need to expand + continue } - } - // Handle max size - if max := spec.MaxSize; !max.IsZero() { - // If already reached max size, don't patch - if pvcCandidate.Capacity.Cmp(max) >= 0 { - return false, nil + newSize, err := scaler.calcNextCapacity(pvc.Capacity, spec.IncreaseQuantity) + if err != nil { + joinedErr = errors.Join(joinedErr, err) + continue } - // Cap new size to the max size - if newSize.Cmp(max) >= 0 { - newSize = max + + if status != nil { + if pvcStatus, ok := status[pvc.Name]; ok && pvcStatus.RequestedSize.Value() == newSize.Value() { + // already requested + continue + } } - } - // Patch object status which will signal the CosmosFullNode controller to increase PVC size. - var patch cosmosv1.CosmosFullNode - patch.TypeMeta = crd.TypeMeta - patch.Namespace = crd.Namespace - patch.Name = crd.Name - return true, scaler.client.SyncUpdate(ctx, client.ObjectKeyFromObject(&patch), func(status *cosmosv1.FullNodeStatus) { - status.SelfHealing.PVCAutoScale = &cosmosv1.PVCAutoScaleStatus{ + if max := spec.MaxSize; !max.IsZero() { + if pvc.Capacity.Cmp(max) >= 0 { + // already at max size + continue + } + + if newSize.Cmp(max) >= 0 { + // Cap new size to the max size + newSize = max + } + } + + patches[pvc.Name] = &cosmosv1.PVCAutoScaleStatus{ RequestedSize: newSize, - RequestedAt: metav1.NewTime(scaler.now()), + RequestedAt: now, + } + } + + if len(patches) == 0 { + return false, joinedErr + } + + return true, errors.Join(joinedErr, scaler.client.SyncUpdate(ctx, client.ObjectKeyFromObject(crd), func(status *cosmosv1.FullNodeStatus) { + if status.SelfHealing.PVCAutoScale == nil { + status.SelfHealing.PVCAutoScale = patches + return + } + for k, v := range patches { + status.SelfHealing.PVCAutoScale[k] = v } - }) + })) } func (scaler PVCAutoScaler) calcNextCapacity(current resource.Quantity, increase string) (resource.Quantity, error) { diff --git a/internal/fullnode/pvc_auto_scaler_test.go b/internal/fullnode/pvc_auto_scaler_test.go index 955f3cd3..cb757801 100644 --- a/internal/fullnode/pvc_auto_scaler_test.go +++ b/internal/fullnode/pvc_auto_scaler_test.go @@ -77,8 +77,8 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { var got cosmosv1.FullNodeStatus update(&got) gotStatus := got.SelfHealing.PVCAutoScale - require.Equal(t, stubNow, gotStatus.RequestedAt.Time, tt) - require.Truef(t, tt.Want.Equal(gotStatus.RequestedSize), "%s:\nwant %+v\ngot %+v", tt, tt.Want, gotStatus.RequestedSize) + require.Equal(t, stubNow, gotStatus["pvc-"+name+"-0"].RequestedAt.Time, tt) + require.Truef(t, tt.Want.Equal(gotStatus["pvc-"+name+"-0"].RequestedSize), "%s:\nwant %+v\ngot %+v", tt, tt.Want, gotStatus["pvc-"+name+"-0"].RequestedSize) patchCalled = true return nil @@ -91,9 +91,9 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { trigger := 80 + r.Intn(20) usage := []PVCDiskUsage{ - {PercentUsed: trigger, Capacity: capacity}, - {PercentUsed: 10}, - {PercentUsed: 79}, + {Name: "pvc-" + name + "-0", PercentUsed: trigger, Capacity: capacity}, + {Name: "pvc-" + name + "-1", PercentUsed: 10}, + {Name: "pvc-" + name + "-2", PercentUsed: 79}, } got, err := scaler.SignalPVCResize(ctx, &crd, lo.Shuffle(usage)) @@ -111,6 +111,8 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { const usedSpacePercentage = 80 var crd cosmosv1.CosmosFullNode + name := "name" + crd.Name = name crd.Spec.SelfHeal = &cosmosv1.SelfHealSpec{ PVCAutoScale: &cosmosv1.PVCAutoScaleSpec{ UsedSpacePercentage: usedSpacePercentage, @@ -124,8 +126,8 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { var got cosmosv1.FullNodeStatus update(&got) gotStatus := got.SelfHealing.PVCAutoScale - require.Equal(t, maxSize.Value(), gotStatus.RequestedSize.Value()) - require.Equal(t, maxSize.Format, gotStatus.RequestedSize.Format) + require.Equal(t, maxSize.Value(), gotStatus["pvc-"+name+"-0"].RequestedSize.Value()) + require.Equal(t, maxSize.Format, gotStatus["pvc-"+name+"-0"].RequestedSize.Format) patchCalled = true return nil @@ -133,7 +135,7 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { scaler := NewPVCAutoScaler(syncer) usage := []PVCDiskUsage{ - {PercentUsed: 80, Capacity: capacity}, + {Name: "pvc-" + name + "-0", PercentUsed: 80, Capacity: capacity}, } got, err := scaler.SignalPVCResize(ctx, &crd, lo.Shuffle(usage)) @@ -152,6 +154,8 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { const usedSpacePercentage = 60 var crd cosmosv1.CosmosFullNode + name := "name" + crd.Name = name crd.Spec.SelfHeal = &cosmosv1.SelfHealSpec{ PVCAutoScale: &cosmosv1.PVCAutoScaleSpec{ UsedSpacePercentage: usedSpacePercentage, @@ -162,7 +166,7 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { scaler := NewPVCAutoScaler(panicSyncer) usage := []PVCDiskUsage{ - {PercentUsed: 80, Capacity: tt.Capacity}, + {Name: "pvc-" + name + "-0", PercentUsed: 80, Capacity: tt.Capacity}, } got, err := scaler.SignalPVCResize(ctx, &crd, usage) @@ -202,19 +206,23 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { const usedSpacePercentage = 90 var crd cosmosv1.CosmosFullNode + name := "name" + crd.Name = name crd.Spec.SelfHeal = &cosmosv1.SelfHealSpec{ PVCAutoScale: &cosmosv1.PVCAutoScaleSpec{ UsedSpacePercentage: usedSpacePercentage, IncreaseQuantity: "10Gi", }, } - crd.Status.SelfHealing.PVCAutoScale = &cosmosv1.PVCAutoScaleStatus{ - RequestedSize: resource.MustParse("100Gi"), + crd.Status.SelfHealing.PVCAutoScale = map[string]*cosmosv1.PVCAutoScaleStatus{ + "pvc-" + name + "-0": { + RequestedSize: resource.MustParse("100Gi"), + }, } scaler := NewPVCAutoScaler(panicSyncer) usage := []PVCDiskUsage{ - {PercentUsed: usedSpacePercentage, Capacity: resource.MustParse("90Gi")}, + {Name: "pvc-" + name + "-0", PercentUsed: usedSpacePercentage, Capacity: resource.MustParse("90Gi")}, } got, err := scaler.SignalPVCResize(ctx, &crd, usage) @@ -232,6 +240,8 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { {"wut"}, } { var crd cosmosv1.CosmosFullNode + name := "name" + crd.Name = name crd.Spec.SelfHeal = &cosmosv1.SelfHealSpec{ PVCAutoScale: &cosmosv1.PVCAutoScaleSpec{ UsedSpacePercentage: usedSpacePercentage, @@ -241,12 +251,12 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { scaler := NewPVCAutoScaler(panicSyncer) usage := []PVCDiskUsage{ - {PercentUsed: usedSpacePercentage}, + {Name: "pvc-" + name + "-0", PercentUsed: usedSpacePercentage}, } _, err := scaler.SignalPVCResize(ctx, &crd, lo.Shuffle(usage)) require.Error(t, err) - require.Contains(t, err.Error(), "increaseQuantity must be a percentage string (e.g. 10%) or a storage quantity (e.g. 100Gi):") + require.Contains(t, err.Error(), "invalid value for IntOrString: invalid type: string is not a percentage") } }) @@ -265,7 +275,7 @@ func TestPVCAutoScaler_SignalPVCResize(t *testing.T) { return errors.New("boom") })) usage := []PVCDiskUsage{ - {PercentUsed: usedSpacePercentage}, + {Name: "pvc-0", PercentUsed: usedSpacePercentage}, } _, err := scaler.SignalPVCResize(ctx, &crd, lo.Shuffle(usage)) diff --git a/internal/fullnode/pvc_builder.go b/internal/fullnode/pvc_builder.go index 12f113c0..b895872a 100644 --- a/internal/fullnode/pvc_builder.go +++ b/internal/fullnode/pvc_builder.go @@ -21,7 +21,11 @@ var ( ) // BuildPVCs outputs desired PVCs given the crd. -func BuildPVCs(crd *cosmosv1.CosmosFullNode) []diff.Resource[*corev1.PersistentVolumeClaim] { +func BuildPVCs( + crd *cosmosv1.CosmosFullNode, + dataSources map[int32]*dataSource, + currentPVCs []*corev1.PersistentVolumeClaim, +) []diff.Resource[*corev1.PersistentVolumeClaim] { base := corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -45,6 +49,21 @@ func BuildPVCs(crd *cosmosv1.CosmosFullNode) []diff.Resource[*corev1.PersistentV pvc.Name = name pvc.Labels[kube.InstanceLabel] = instanceName(crd, i) + var dataSource *corev1.TypedLocalObjectReference + var existingSize resource.Quantity + if ds, ok := dataSources[i]; ok && ds != nil { + dataSource = ds.ref + } else { + for _, pvc := range currentPVCs { + if pvc.Name == name { + if pvc.DeletionTimestamp == nil && pvc.Status.Phase == corev1.ClaimBound { + existingSize = pvc.Status.Capacity[corev1.ResourceStorage] + } + break + } + } + } + tpl := crd.Spec.VolumeClaimTemplate if override, ok := crd.Spec.InstanceOverrides[instanceName(crd, i)]; ok { if overrideTpl := override.VolumeClaimTemplate; overrideTpl != nil { @@ -54,10 +73,9 @@ func BuildPVCs(crd *cosmosv1.CosmosFullNode) []diff.Resource[*corev1.PersistentV pvc.Spec = corev1.PersistentVolumeClaimSpec{ AccessModes: sliceOrDefault(tpl.AccessModes, defaultAccessModes), - Resources: pvcResources(crd), + Resources: pvcResources(crd, name, dataSources[i], existingSize), StorageClassName: ptr(tpl.StorageClassName), VolumeMode: valOrDefault(tpl.VolumeMode, ptr(corev1.PersistentVolumeFilesystem)), - DataSource: tpl.DataSource, } preserveMergeInto(pvc.Labels, tpl.Metadata.Labels) @@ -65,6 +83,7 @@ func BuildPVCs(crd *cosmosv1.CosmosFullNode) []diff.Resource[*corev1.PersistentV kube.NormalizeMetadata(&pvc.ObjectMeta) pvcs = append(pvcs, diff.Adapt(pvc, i)) + pvc.Spec.DataSource = dataSource } return pvcs } @@ -80,19 +99,33 @@ func pvcName(crd *cosmosv1.CosmosFullNode, ordinal int32) string { return kube.ToName(name) } -func pvcResources(crd *cosmosv1.CosmosFullNode) corev1.ResourceRequirements { - var ( - reqs = crd.Spec.VolumeClaimTemplate.Resources - size = reqs.Requests[corev1.ResourceStorage] - ) +func pvcResources( + crd *cosmosv1.CosmosFullNode, + name string, + dataSource *dataSource, + existingSize resource.Quantity, +) corev1.ResourceRequirements { + var reqs = crd.Spec.VolumeClaimTemplate.Resources.DeepCopy() + + if dataSource != nil { + reqs.Requests[corev1.ResourceStorage] = dataSource.size + return *reqs + } if autoScale := crd.Status.SelfHealing.PVCAutoScale; autoScale != nil { - requestedSize := autoScale.RequestedSize.DeepCopy() - newSize := requestedSize.AsDec() - sizeWithPadding := resource.NewDecimalQuantity(*newSize.Mul(newSize, inf.NewDec(snapshotGrowthFactor, 2)), resource.DecimalSI) - if sizeWithPadding.Cmp(size) > 0 { - reqs.Requests[corev1.ResourceStorage] = *sizeWithPadding + if status, ok := autoScale[name]; ok { + requestedSize := status.RequestedSize.DeepCopy() + newSize := requestedSize.AsDec() + sizeWithPadding := resource.NewDecimalQuantity(*newSize.Mul(newSize, inf.NewDec(snapshotGrowthFactor, 2)), resource.DecimalSI) + if sizeWithPadding.Cmp(reqs.Requests[corev1.ResourceStorage]) > 0 { + reqs.Requests[corev1.ResourceStorage] = *sizeWithPadding + } } } - return reqs + + if existingSize.Cmp(reqs.Requests[corev1.ResourceStorage]) > 0 { + reqs.Requests[corev1.ResourceStorage] = existingSize + } + + return *reqs } diff --git a/internal/fullnode/pvc_builder_test.go b/internal/fullnode/pvc_builder_test.go index 26cdb51e..dca1390b 100644 --- a/internal/fullnode/pvc_builder_test.go +++ b/internal/fullnode/pvc_builder_test.go @@ -22,23 +22,23 @@ func TestBuildPVCs(t *testing.T) { crd := defaultCRD() crd.Name = "juno" crd.Spec.Replicas = 3 - crd.Spec.VolumeClaimTemplate = cosmosv1.PersistentVolumeClaimSpec{ - StorageClassName: "test-storage-class", - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("100G")}, - }, - } + crd.Spec.VolumeClaimTemplate.StorageClassName = "test-storage-class" crd.Spec.InstanceOverrides = map[string]cosmosv1.InstanceOverridesSpec{ "juno-0": {}, } - for i, r := range BuildPVCs(&crd) { + initial := BuildPVCs(&crd, map[int32]*dataSource{}, nil) + for i, r := range initial { require.Equal(t, int64(i), r.Ordinal()) require.NotEmpty(t, r.Revision()) } - pvcs := lo.Map(BuildPVCs(&crd), func(r diff.Resource[*corev1.PersistentVolumeClaim], _ int) *corev1.PersistentVolumeClaim { + initialPVCs := lo.Map(initial, func(r diff.Resource[*corev1.PersistentVolumeClaim], _ int) *corev1.PersistentVolumeClaim { + return r.Object() + }) + + pvcs := lo.Map(BuildPVCs(&crd, map[int32]*dataSource{}, initialPVCs), func(r diff.Resource[*corev1.PersistentVolumeClaim], _ int) *corev1.PersistentVolumeClaim { return r.Object() }) @@ -75,20 +75,22 @@ func TestBuildPVCs(t *testing.T) { t.Run("advanced configuration", func(t *testing.T) { crd := defaultCRD() crd.Spec.Replicas = 1 - crd.Spec.VolumeClaimTemplate = cosmosv1.PersistentVolumeClaimSpec{ - Metadata: cosmosv1.Metadata{ - Labels: map[string]string{"label": "value", "app.kubernetes.io/created-by": "should not see me"}, - Annotations: map[string]string{"annot": "value"}, - }, - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - VolumeMode: ptr(corev1.PersistentVolumeBlock), - DataSource: &corev1.TypedLocalObjectReference{ - Kind: "TestKind", - Name: "source-name", - }, + crd.Spec.VolumeClaimTemplate.Metadata = cosmosv1.Metadata{ + Labels: map[string]string{"label": "value", "app.kubernetes.io/created-by": "should not see me"}, + Annotations: map[string]string{"annot": "value"}, + } + crd.Spec.VolumeClaimTemplate.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany} + crd.Spec.VolumeClaimTemplate.VolumeMode = ptr(corev1.PersistentVolumeBlock) + crd.Spec.VolumeClaimTemplate.DataSource = &corev1.TypedLocalObjectReference{ + Kind: "TestKind", + Name: "source-name", } - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{ + 0: { + ref: crd.Spec.VolumeClaimTemplate.DataSource, + }, + }, nil) require.NotEmpty(t, pvcs) got := pvcs[0].Object() @@ -126,7 +128,7 @@ func TestBuildPVCs(t *testing.T) { }, } - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) require.Equal(t, 2, len(pvcs)) got1, got2 := pvcs[0].Object(), pvcs[1].Object() @@ -141,7 +143,7 @@ func TestBuildPVCs(t *testing.T) { crd.Spec.Replicas = 3 crd.Name = strings.Repeat("Y", 300) - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) require.NotEmpty(t, pvcs) for _, got := range pvcs { @@ -158,16 +160,15 @@ func TestBuildPVCs(t *testing.T) { } { crd := defaultCRD() crd.Spec.Replicas = 1 - crd.Spec.VolumeClaimTemplate = cosmosv1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.SpecQuant)}, + crd.Spec.VolumeClaimTemplate.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(tt.SpecQuant) + + crd.Status.SelfHealing.PVCAutoScale = map[string]*cosmosv1.PVCAutoScaleStatus{ + "pvc-osmosis-0": { + RequestedSize: resource.MustParse(tt.AutoScaleQuant), }, } - crd.Status.SelfHealing.PVCAutoScale = &cosmosv1.PVCAutoScaleStatus{ - RequestedSize: resource.MustParse(tt.AutoScaleQuant), - } - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) require.Len(t, pvcs, 1, tt) want := corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.WantQuant)} @@ -183,16 +184,15 @@ func TestBuildPVCs(t *testing.T) { } { crd := defaultCRD() crd.Spec.Replicas = 1 - crd.Spec.VolumeClaimTemplate = cosmosv1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.SpecQuant)}, + crd.Spec.VolumeClaimTemplate.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(tt.SpecQuant) + + crd.Status.SelfHealing.PVCAutoScale = map[string]*cosmosv1.PVCAutoScaleStatus{ + "pvc-osmosis-0": { + RequestedSize: resource.MustParse(tt.AutoScaleQuant), }, } - crd.Status.SelfHealing.PVCAutoScale = &cosmosv1.PVCAutoScaleStatus{ - RequestedSize: resource.MustParse(tt.AutoScaleQuant), - } - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) require.Len(t, pvcs, 1, tt) want := corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.WantQuant)} @@ -200,7 +200,7 @@ func TestBuildPVCs(t *testing.T) { } }) - t.Run("given auto scale size greater then current size", func(t *testing.T) { + t.Run("given auto scale size greater than current size", func(t *testing.T) { for _, tt := range []struct { SpecQuant, AutoScaleQuant, WantQuant string }{ @@ -208,16 +208,15 @@ func TestBuildPVCs(t *testing.T) { } { crd := defaultCRD() crd.Spec.Replicas = 1 - crd.Spec.VolumeClaimTemplate = cosmosv1.PersistentVolumeClaimSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.SpecQuant)}, + crd.Spec.VolumeClaimTemplate.Resources.Requests[corev1.ResourceStorage] = resource.MustParse(tt.SpecQuant) + + crd.Status.SelfHealing.PVCAutoScale = map[string]*cosmosv1.PVCAutoScaleStatus{ + "pvc-osmosis-0": { + RequestedSize: resource.MustParse(tt.AutoScaleQuant), }, } - crd.Status.SelfHealing.PVCAutoScale = &cosmosv1.PVCAutoScaleStatus{ - RequestedSize: resource.MustParse(tt.AutoScaleQuant), - } - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) require.Len(t, pvcs, 1, tt) want := corev1.ResourceList{corev1.ResourceStorage: resource.MustParse(tt.WantQuant)} @@ -227,7 +226,7 @@ func TestBuildPVCs(t *testing.T) { }) test.HasTypeLabel(t, func(crd cosmosv1.CosmosFullNode) []map[string]string { - pvcs := BuildPVCs(&crd) + pvcs := BuildPVCs(&crd, map[int32]*dataSource{}, nil) labels := make([]map[string]string, 0) for _, pvc := range pvcs { labels = append(labels, pvc.Object().Labels) diff --git a/internal/fullnode/pvc_control.go b/internal/fullnode/pvc_control.go index 5d5a9956..6786f377 100644 --- a/internal/fullnode/pvc_control.go +++ b/internal/fullnode/pvc_control.go @@ -10,6 +10,7 @@ import ( "github.com/strangelove-ventures/cosmos-operator/internal/diff" "github.com/strangelove-ventures/cosmos-operator/internal/kube" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -30,9 +31,13 @@ func NewPVCControl(client Client) PVCControl { } } +type PVCStatusChanges struct { + Deleted []string +} + // Reconcile is the control loop for PVCs. The bool return value, if true, indicates the controller should requeue // the request. -func (control PVCControl) Reconcile(ctx context.Context, reporter kube.Reporter, crd *cosmosv1.CosmosFullNode) (bool, kube.ReconcileError) { +func (control PVCControl) Reconcile(ctx context.Context, reporter kube.Reporter, crd *cosmosv1.CosmosFullNode, pvcStatusChanges *PVCStatusChanges) (bool, kube.ReconcileError) { // Find any existing pvcs for this CRD. var vols corev1.PersistentVolumeClaimList if err := control.client.List(ctx, &vols, @@ -42,35 +47,61 @@ func (control PVCControl) Reconcile(ctx context.Context, reporter kube.Reporter, return false, kube.TransientError(fmt.Errorf("list existing pvcs: %w", err)) } + var currentPVCs = ptrSlice(vols.Items) + + dataSources := make(map[int32]*dataSource) + if len(currentPVCs) < int(crd.Spec.Replicas) { + for i := int32(0); i < crd.Spec.Replicas; i++ { + name := pvcName(crd, i) + found := false + for _, pvc := range currentPVCs { + if pvc.Name == name { + found = true + break + } + } + if !found { + ds := control.findDataSource(ctx, reporter, crd, i) + if ds == nil { + ds = &dataSource{ + size: crd.Spec.VolumeClaimTemplate.Resources.Requests[corev1.ResourceStorage], + } + } + dataSources[i] = ds + } + } + } + var ( - currentPVCs = ptrSlice(vols.Items) - wantPVCs = BuildPVCs(crd) - diffed = diff.New(currentPVCs, wantPVCs) + wantPVCs = BuildPVCs(crd, dataSources, currentPVCs) + diffed = diff.New(currentPVCs, wantPVCs) ) - var dataSource *corev1.TypedLocalObjectReference - if len(diffed.Creates()) > 0 { - dataSource = control.findDataSource(ctx, reporter, crd) - } - for _, pvc := range diffed.Creates() { - pvc.Spec.DataSource = dataSource - reporter.Info("Creating pvc", "pvc", pvc.Name) + size := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + + reporter.Info( + "Creating pvc", + "name", pvc.Name, + "size", size.String(), + ) if err := ctrl.SetControllerReference(crd, pvc, control.client.Scheme()); err != nil { return true, kube.TransientError(fmt.Errorf("set controller reference on pvc %q: %w", pvc.Name, err)) } if err := control.client.Create(ctx, pvc); kube.IgnoreAlreadyExists(err) != nil { return true, kube.TransientError(fmt.Errorf("create pvc %q: %w", pvc.Name, err)) } + pvcStatusChanges.Deleted = append(pvcStatusChanges.Deleted, pvc.Name) } var deletes int if !control.shouldRetain(crd) { for _, pvc := range diffed.Deletes() { - reporter.Info("Deleting pvc", "pvc", pvc.Name) + reporter.Info("Deleting pvc", "name", pvc.Name) if err := control.client.Delete(ctx, pvc, client.PropagationPolicy(metav1.DeletePropagationForeground)); client.IgnoreNotFound(err) != nil { return true, kube.TransientError(fmt.Errorf("delete pvc %q: %w", pvc.Name, err)) } + pvcStatusChanges.Deleted = append(pvcStatusChanges.Deleted, pvc.Name) } deletes = len(diffed.Deletes()) } @@ -92,7 +123,12 @@ func (control PVCControl) Reconcile(ctx context.Context, reporter kube.Reporter, // PVCs have many immutable fields, so only update the storage size. for _, pvc := range diffed.Updates() { - reporter.Info("Patching pvc", "pvc", pvc.Name) + size := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + reporter.Info( + "Patching pvc", + "name", pvc.Name, + "size", size.String(), // TODO remove expensive operation + ) patch := corev1.PersistentVolumeClaim{ ObjectMeta: pvc.ObjectMeta, TypeMeta: pvc.TypeMeta, @@ -101,7 +137,7 @@ func (control PVCControl) Reconcile(ctx context.Context, reporter kube.Reporter, }, } if err := control.client.Patch(ctx, &patch, client.Merge); err != nil { - reporter.Error(err, "PVC patch failed", "pvc", pvc.Name) + reporter.Error(err, "PVC patch failed", "name", pvc.Name) reporter.RecordError("PVCPatchFailed", err) continue } @@ -117,11 +153,60 @@ func (control PVCControl) shouldRetain(crd *cosmosv1.CosmosFullNode) bool { return false } -func (control PVCControl) findDataSource(ctx context.Context, reporter kube.Reporter, crd *cosmosv1.CosmosFullNode) *corev1.TypedLocalObjectReference { - if ds := crd.Spec.VolumeClaimTemplate.DataSource; ds != nil { - return ds +type dataSource struct { + ref *corev1.TypedLocalObjectReference + + size resource.Quantity +} + +func (control PVCControl) findDataSource(ctx context.Context, reporter kube.Reporter, crd *cosmosv1.CosmosFullNode, ordinal int32) *dataSource { + if override, ok := crd.Spec.InstanceOverrides[instanceName(crd, ordinal)]; ok { + if overrideTpl := override.VolumeClaimTemplate; overrideTpl != nil { + return control.findDataSourceWithPvcSpec(ctx, reporter, crd, *overrideTpl, ordinal) + } + } + + return control.findDataSourceWithPvcSpec(ctx, reporter, crd, crd.Spec.VolumeClaimTemplate, ordinal) +} + +func (control PVCControl) findDataSourceWithPvcSpec( + ctx context.Context, + reporter kube.Reporter, + crd *cosmosv1.CosmosFullNode, + pvcSpec cosmosv1.PersistentVolumeClaimSpec, + ordinal int32, +) *dataSource { + if ds := pvcSpec.DataSource; ds != nil { + if ds.Kind == "VolumeSnapshot" && ds.APIGroup != nil && *ds.APIGroup == "snapshot.storage.k8s.io" { + var vs snapshotv1.VolumeSnapshot + if err := control.client.Get(ctx, client.ObjectKey{Namespace: crd.Namespace, Name: ds.Name}, &vs); err != nil { + reporter.Error(err, "Failed to get VolumeSnapshot for DataSource") + reporter.RecordError("DataSourceGetSnapshot", err) + return nil + } + return &dataSource{ + ref: ds, + size: *vs.Status.RestoreSize, + } + } else if ds.Kind == "PersistentVolumeClaim" && (ds.APIGroup == nil || *ds.APIGroup == "") { + var pvc corev1.PersistentVolumeClaim + if err := control.client.Get(ctx, client.ObjectKey{Namespace: crd.Namespace, Name: ds.Name}, &pvc); err != nil { + reporter.Error(err, "Failed to get PersistentVolumeClaim for DataSource") + reporter.RecordError("DataSourceGetPVC", err) + return nil + } + return &dataSource{ + ref: ds, + size: pvc.Status.Capacity["storage"], + } + } else { + err := fmt.Errorf("unsupported DataSource %s", ds.Kind) + reporter.Error(err, "Unsupported DataSource") + reporter.RecordError("DataSourceUnsupported", err) + return nil + } } - spec := crd.Spec.VolumeClaimTemplate.AutoDataSource + spec := pvcSpec.AutoDataSource if spec == nil { return nil } @@ -129,6 +214,9 @@ func (control PVCControl) findDataSource(ctx context.Context, reporter kube.Repo if len(selector) == 0 { return nil } + if spec.MatchInstance { + selector[kube.InstanceLabel] = instanceName(crd, ordinal) + } found, err := control.recentVolumeSnapshot(ctx, control.client, crd.Namespace, selector) if err != nil { reporter.Error(err, "Failed to find VolumeSnapshot for AutoDataSource") @@ -137,9 +225,12 @@ func (control PVCControl) findDataSource(ctx context.Context, reporter kube.Repo } reporter.RecordInfo("AutoDataSource", "Using recent VolumeSnapshot for PVC data source") - return &corev1.TypedLocalObjectReference{ - APIGroup: ptr("snapshot.storage.k8s.io"), - Kind: "VolumeSnapshot", - Name: found.Name, + return &dataSource{ + ref: &corev1.TypedLocalObjectReference{ + APIGroup: ptr("snapshot.storage.k8s.io"), + Kind: "VolumeSnapshot", + Name: found.Name, + }, + size: *found.Status.RestoreSize, } } diff --git a/internal/fullnode/pvc_control_test.go b/internal/fullnode/pvc_control_test.go index fbfb26ce..584e7d6c 100644 --- a/internal/fullnode/pvc_control_test.go +++ b/internal/fullnode/pvc_control_test.go @@ -40,7 +40,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Name = "hub" crd.Namespace = namespace crd.Spec.Replicas = 1 - existing := diff.New(nil, BuildPVCs(&crd)).Creates()[0] + existing := diff.New(nil, BuildPVCs(&crd, map[int32]*dataSource{}, nil)).Creates()[0] existing.Status.Phase = corev1.ClaimBound var mClient mockPVCClient @@ -52,7 +52,7 @@ func TestPVCControl_Reconcile(t *testing.T) { control := testPVCControl(&mClient) - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.False(t, requeue) @@ -73,7 +73,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Namespace = namespace crd.Name = "hub" crd.Spec.Replicas = 1 - existing := BuildPVCs(&crd)[0].Object() + existing := BuildPVCs(&crd, map[int32]*dataSource{}, nil)[0].Object() var mClient mockPVCClient mClient.ObjectList = corev1.PersistentVolumeClaimList{ @@ -86,7 +86,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Spec.Replicas = 4 control := testPVCControl(&mClient) - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.True(t, requeue) @@ -111,6 +111,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Spec.VolumeClaimTemplate.AutoDataSource = &cosmosv1.AutoDataSource{ VolumeSnapshotSelector: map[string]string{"label": "vol-snapshot"}, } + var volCallCount int control.recentVolumeSnapshot = func(ctx context.Context, lister kube.Lister, namespace string, selector map[string]string) (*snapshotv1.VolumeSnapshot, error) { require.NotNil(t, ctx) @@ -119,14 +120,18 @@ func TestPVCControl_Reconcile(t *testing.T) { require.Equal(t, map[string]string{"label": "vol-snapshot"}, selector) var stub snapshotv1.VolumeSnapshot stub.Name = "found-snapshot" + stub.Status = &snapshotv1.VolumeSnapshotStatus{ + ReadyToUse: ptr(true), + RestoreSize: ptr(resource.MustParse("100Gi")), + } volCallCount++ return &stub, nil } - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.True(t, requeue) - require.Equal(t, 1, volCallCount) + require.Equal(t, 3, volCallCount) require.Equal(t, 3, mClient.CreateCount) want := corev1.TypedLocalObjectReference{ @@ -162,7 +167,19 @@ func TestPVCControl_Reconcile(t *testing.T) { control.recentVolumeSnapshot = func(ctx context.Context, lister kube.Lister, namespace string, selector map[string]string) (*snapshotv1.VolumeSnapshot, error) { panic("should not be called") } - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + + mClient.Object = snapshotv1.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-set-snapshot", + Namespace: namespace, + }, + Status: &snapshotv1.VolumeSnapshotStatus{ + ReadyToUse: ptr(true), + RestoreSize: ptr(resource.MustParse("100Gi")), + }, + } + + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.True(t, requeue) @@ -191,7 +208,7 @@ func TestPVCControl_Reconcile(t *testing.T) { volCallCount++ return nil, errors.New("boom") } - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.True(t, requeue) @@ -208,7 +225,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Spec.Replicas = 1 var mClient mockPVCClient - existing := BuildPVCs(&crd)[0].Object() + existing := BuildPVCs(&crd, map[int32]*dataSource{}, nil)[0].Object() existing.Status.Phase = corev1.ClaimBound mClient.ObjectList = corev1.PersistentVolumeClaimList{ Items: []corev1.PersistentVolumeClaim{*existing}, @@ -216,12 +233,10 @@ func TestPVCControl_Reconcile(t *testing.T) { // Cause a change crd.Spec.VolumeClaimTemplate.VolumeMode = ptr(corev1.PersistentVolumeMode("should not be in the patch")) - crd.Spec.VolumeClaimTemplate.Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{"memory": resource.MustParse("1Gi")}, - } + crd.Spec.VolumeClaimTemplate.Resources.Requests["memory"] = resource.MustParse("1Gi") control := testPVCControl(&mClient) - requeue, rerr := control.Reconcile(ctx, nopReporter, &crd) + requeue, rerr := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, rerr) require.False(t, requeue) @@ -244,7 +259,7 @@ func TestPVCControl_Reconcile(t *testing.T) { crd.Namespace = namespace crd.Spec.Replicas = 1 - existing := BuildPVCs(&crd)[0].Object() + existing := BuildPVCs(&crd, map[int32]*dataSource{}, nil)[0].Object() existing.Status.Phase = corev1.ClaimPending var mClient mockPVCClient mClient.ObjectList = corev1.PersistentVolumeClaimList{ @@ -252,11 +267,10 @@ func TestPVCControl_Reconcile(t *testing.T) { } // Cause a change - crd.Spec.VolumeClaimTemplate.Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("1Ti")}, - } + crd.Spec.VolumeClaimTemplate.Resources.Requests[corev1.ResourceStorage] = resource.MustParse("1Ti") + control := testPVCControl(&mClient) - requeue, rerr := control.Reconcile(ctx, nopReporter, &crd) + requeue, rerr := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, rerr) require.True(t, requeue) @@ -278,7 +292,7 @@ func TestPVCControl_Reconcile(t *testing.T) { } control := testPVCControl(&mClient) - requeue, err := control.Reconcile(ctx, nopReporter, &crd) + requeue, err := control.Reconcile(ctx, nopReporter, &crd, &PVCStatusChanges{}) require.NoError(t, err) require.False(t, requeue)