Skip to content

Commit c25e4c7

Browse files
fix: ensure accurate PVC sizing during volume migration between different CSI drivers (#1613)
Description This fix addresses volume migration between CSI drivers with different provisioning characteristics by preserving actual disk capacity information. Related Upstream PR: deckhouse/3p-kubevirt#36 Why do we need it, and what problem does it solve? The Problem: Different CSI drivers may provision volumes with slightly different actual sizes due to: Storage provider alignment optimizations Allocation unit rounding Performance optimization padding This causes issues when migrating between storage backends, as the requested capacity may not match the physically provisioned size. The Solution: ProvisionedCapacity - actual physical size provisioned by the CSI driver During migration to a new storage backend, the system uses the ProvisionedCapacity to request the target PVC, ensuring the new volume has sufficient physical capacity regardless of driver-specific provisioning behavior. Implementation Details: ProvisionedCapacity is detected when the virtual machine starting migrating on and the actual disk size can be measured Migration workflows now prioritize physical capacity over requested capacity for target volume sizing Target PVC uses ProvisionedCapacity only when it's larger than requested Capacity Important: Migration between different volumeMode (Block ↔ Filesystem) is currently not supportedur changes with technical details. --------- Signed-off-by: Yaroslav Borbat <[email protected]>
1 parent d5b275f commit c25e4c7

File tree

11 files changed

+255
-73
lines changed

11 files changed

+255
-73
lines changed

build/components/versions.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ firmware:
33
libvirt: v10.9.0
44
edk2: stable202411
55
core:
6-
3p-kubevirt: v1.3.1-v12n.20
6+
3p-kubevirt: v1.3.1-v12n.21
77
3p-containerized-data-importer: v1.60.3-v12n.12
88
distribution: 2.8.3
99
package:

images/virt-artifact/werf.inc.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
{{- $tag := get $.Core $gitRepoName }}
66
{{- $version := (split "-" $tag)._0 }}
77

8-
98
---
109
image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact
1110
final: false

images/virtualization-artifact/pkg/common/vd/vd.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,14 @@ func StorageClassChanged(vd *v1alpha2.VirtualDisk) bool {
5757
}
5858

5959
specSc := vd.Spec.PersistentVolumeClaim.StorageClass
60+
if specSc == nil {
61+
return false
62+
}
63+
6064
statusSc := vd.Status.StorageClassName
65+
if *specSc == statusSc {
66+
return false
67+
}
6168

62-
return specSc != nil && *specSc != statusSc && *specSc != "" && statusSc != ""
69+
return *specSc != "" && statusSc != ""
6370
}

images/virtualization-artifact/pkg/controller/vd/internal/migration.go

Lines changed: 45 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/apimachinery/pkg/types"
3131
"k8s.io/component-base/featuregate"
3232
"k8s.io/utils/ptr"
33+
virtv1 "kubevirt.io/api/core/v1"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3536

@@ -38,6 +39,7 @@ import (
3839
commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd"
3940
commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop"
4041
"github.com/deckhouse/virtualization-controller/pkg/controller/conditions"
42+
"github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder"
4143
"github.com/deckhouse/virtualization-controller/pkg/controller/service"
4244
vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements"
4345
"github.com/deckhouse/virtualization-controller/pkg/logger"
@@ -399,8 +401,19 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1
399401
return nil
400402
}
401403

404+
volumeMode, accessMode, err := h.modeGetter.GetVolumeAndAccessModes(ctx, vd, targetStorageClass)
405+
if err != nil {
406+
return fmt.Errorf("get volume and access modes: %w", err)
407+
}
408+
409+
provisionedCapacity, err := h.getProvisionedCapacity(ctx, vd)
410+
if err != nil {
411+
return fmt.Errorf("get provisioned capacity: %w", err)
412+
}
413+
size = calculateTargetSize(ctx, size, provisionedCapacity)
414+
402415
log.Info("Start creating target PersistentVolumeClaim", slog.String("storageClass", targetStorageClass.Name), slog.String("capacity", size.String()))
403-
pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size, targetPVCName, vd.Status.Target.PersistentVolumeClaim)
416+
pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size, targetPVCName, vd.Status.Target.PersistentVolumeClaim, volumeMode, accessMode)
404417
if err != nil {
405418
return err
406419
}
@@ -429,6 +442,36 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1
429442
return h.handleMigrateSync(ctx, vd)
430443
}
431444

445+
func (h MigrationHandler) getProvisionedCapacity(ctx context.Context, vd *v1alpha2.VirtualDisk) (resource.Quantity, error) {
446+
currentlyMountedVM := commonvd.GetCurrentlyMountedVMName(vd)
447+
if currentlyMountedVM == "" {
448+
return resource.Quantity{}, fmt.Errorf("no currently mounted VM")
449+
}
450+
451+
kvvmi := &virtv1.VirtualMachineInstance{}
452+
err := h.client.Get(ctx, types.NamespacedName{Name: currentlyMountedVM, Namespace: vd.Namespace}, kvvmi)
453+
if err != nil {
454+
return resource.Quantity{}, fmt.Errorf("get KVVMI: %w", err)
455+
}
456+
457+
volumeName := kvbuilder.GenerateVDDiskName(vd.Name)
458+
for _, volumeStatus := range kvvmi.Status.VolumeStatus {
459+
if volumeStatus.Name == volumeName {
460+
return *resource.NewQuantity(volumeStatus.Size, resource.BinarySI), nil
461+
}
462+
}
463+
464+
return resource.Quantity{}, nil
465+
}
466+
467+
func calculateTargetSize(ctx context.Context, size, realSize resource.Quantity) resource.Quantity {
468+
if realSize.IsZero() {
469+
logger.FromContext(ctx).Error("Failed to detect RealSize disk on KVVMI. Please report a bug.")
470+
return size
471+
}
472+
return realSize
473+
}
474+
432475
func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.VirtualDisk) error {
433476
pvc, err := h.getTargetPersistentVolumeClaim(ctx, vd)
434477
if err != nil {
@@ -587,7 +630,7 @@ func (h MigrationHandler) getInProgressMigratingVMOP(ctx context.Context, vm *v1
587630
return nil, nil
588631
}
589632

590-
func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storagev1.StorageClass, size resource.Quantity, targetPVCName, sourcePVCName string) (*corev1.PersistentVolumeClaim, error) {
633+
func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storagev1.StorageClass, size resource.Quantity, targetPVCName, sourcePVCName string, volumeMode corev1.PersistentVolumeMode, accessMode corev1.PersistentVolumeAccessMode) (*corev1.PersistentVolumeClaim, error) {
591634
pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client)
592635
if err != nil {
593636
return nil, err
@@ -606,11 +649,6 @@ func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context,
606649
return nil, fmt.Errorf("unexpected number of pvcs: %d, please report a bug", len(pvcs))
607650
}
608651

609-
volumeMode, accessMode, err := h.modeGetter.GetVolumeAndAccessModes(ctx, vd, sc)
610-
if err != nil {
611-
return nil, fmt.Errorf("get volume and access modes: %w", err)
612-
}
613-
614652
pvc := &corev1.PersistentVolumeClaim{
615653
ObjectMeta: metav1.ObjectMeta{
616654
GenerateName: fmt.Sprintf("vd-%s-", vd.UID),

images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/apimachinery/pkg/runtime"
3131
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
3232
"k8s.io/utils/ptr"
33+
virtv1 "kubevirt.io/api/core/v1"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435
"sigs.k8s.io/controller-runtime/pkg/client/fake"
3536

@@ -80,6 +81,7 @@ var _ = Describe("MigrationHandler", func() {
8081
migrationHandler *MigrationHandler
8182
vd *v1alpha2.VirtualDisk
8283
vm *v1alpha2.VirtualMachine
84+
kvvmi *virtv1.VirtualMachineInstance
8385
storageClass *storagev1.StorageClass
8486
pvc *corev1.PersistentVolumeClaim
8587
)
@@ -90,6 +92,7 @@ var _ = Describe("MigrationHandler", func() {
9092
scheme = runtime.NewScheme()
9193
Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed())
9294
Expect(v1alpha2.AddToScheme(scheme)).To(Succeed())
95+
Expect(virtv1.AddToScheme(scheme)).To(Succeed())
9396

9497
scValidator = &fakeStorageClassValidator{
9598
allowedStorageClasses: map[string]bool{
@@ -138,6 +141,25 @@ var _ = Describe("MigrationHandler", func() {
138141
},
139142
}
140143

144+
kvvmi = &virtv1.VirtualMachineInstance{
145+
TypeMeta: metav1.TypeMeta{
146+
Kind: "VirtualMachineInstance",
147+
APIVersion: "kubevirt.io/v1",
148+
},
149+
ObjectMeta: metav1.ObjectMeta{
150+
Name: "test-vm",
151+
Namespace: "default",
152+
},
153+
Status: virtv1.VirtualMachineInstanceStatus{
154+
VolumeStatus: []virtv1.VolumeStatus{
155+
{
156+
Name: "vd-test-vd",
157+
Size: 10*104*1024*1024 + 2*1024*1024, // 10Gi + 2Mi overhead
158+
},
159+
},
160+
},
161+
}
162+
141163
// Create test StorageClass
142164
storageClass = &storagev1.StorageClass{
143165
ObjectMeta: metav1.ObjectMeta{
@@ -306,7 +328,14 @@ var _ = Describe("MigrationHandler", func() {
306328
pvc.Status.Capacity = corev1.ResourceList{
307329
corev1.ResourceStorage: resource.MustParse("10Gi"),
308330
}
331+
vd.Status.AttachedToVirtualMachines = []v1alpha2.AttachedVirtualMachine{
332+
{
333+
Name: "test-vm",
334+
Mounted: true,
335+
},
336+
}
309337
Expect(fakeClient.Create(ctx, pvc)).To(Succeed())
338+
Expect(fakeClient.Create(ctx, kvvmi)).To(Succeed())
310339
})
311340

312341
It("should start migration", func() {

images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go

Lines changed: 2 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -19,47 +19,28 @@ package validator
1919
import (
2020
"context"
2121
"errors"
22-
"fmt"
2322
"reflect"
2423

2524
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26-
"sigs.k8s.io/controller-runtime/pkg/client"
2725
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
2826

29-
commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd"
3027
"github.com/deckhouse/virtualization-controller/pkg/controller/conditions"
3128
intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service"
32-
"github.com/deckhouse/virtualization-controller/pkg/featuregates"
3329
"github.com/deckhouse/virtualization/api/core/v1alpha2"
3430
"github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition"
3531
)
3632

3733
type SpecChangesValidator struct {
38-
client client.Client
3934
scService *intsvc.VirtualDiskStorageClassService
4035
}
4136

42-
func NewSpecChangesValidator(client client.Client, scService *intsvc.VirtualDiskStorageClassService) *SpecChangesValidator {
37+
func NewSpecChangesValidator(scService *intsvc.VirtualDiskStorageClassService) *SpecChangesValidator {
4338
return &SpecChangesValidator{
44-
client: client,
4539
scService: scService,
4640
}
4741
}
4842

4943
func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) {
50-
if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" {
51-
sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass)
52-
if err != nil {
53-
return nil, err
54-
}
55-
if v.scService.IsStorageClassDeprecated(sc) {
56-
return nil, fmt.Errorf(
57-
"the provisioner of the %q storage class is deprecated; please use a different one",
58-
*newVD.Spec.PersistentVolumeClaim.StorageClass,
59-
)
60-
}
61-
}
62-
6344
return nil, nil
6445
}
6546

@@ -70,54 +51,15 @@ func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD
7051

7152
ready, _ := conditions.GetCondition(vdcondition.ReadyType, newVD.Status.Conditions)
7253
switch {
73-
case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == v1alpha2.DiskReady, newVD.Status.Phase == v1alpha2.DiskLost:
54+
case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == v1alpha2.DiskReady, newVD.Status.Phase == v1alpha2.DiskMigrating, newVD.Status.Phase == v1alpha2.DiskLost:
7455
if !reflect.DeepEqual(oldVD.Spec.DataSource, newVD.Spec.DataSource) {
7556
return nil, errors.New("data source cannot be changed if the VirtualDisk has already been provisioned")
7657
}
7758

78-
if !reflect.DeepEqual(oldVD.Spec.PersistentVolumeClaim.StorageClass, newVD.Spec.PersistentVolumeClaim.StorageClass) {
79-
if commonvd.VolumeMigrationEnabled(featuregates.Default(), newVD) {
80-
vmName := commonvd.GetCurrentlyMountedVMName(newVD)
81-
if vmName == "" {
82-
return nil, errors.New("storage class cannot be changed if the VirtualDisk not mounted to virtual machine")
83-
}
84-
85-
vm := &v1alpha2.VirtualMachine{}
86-
err := v.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: newVD.Namespace}, vm)
87-
if err != nil {
88-
return nil, err
89-
}
90-
91-
if !(vm.Status.Phase == v1alpha2.MachineRunning || vm.Status.Phase == v1alpha2.MachineMigrating) {
92-
return nil, errors.New("storage class cannot be changed unless the VirtualDisk is mounted to a running virtual machine")
93-
}
94-
95-
for _, bd := range vm.Status.BlockDeviceRefs {
96-
if bd.Hotplugged {
97-
return nil, errors.New("for now, changing the storage class is not allowed if the virtual machine has hot-plugged block devices")
98-
}
99-
}
100-
} else {
101-
return nil, errors.New("storage class cannot be changed if the VirtualDisk has already been provisioned")
102-
}
103-
}
10459
case newVD.Status.Phase == v1alpha2.DiskTerminating:
10560
if !reflect.DeepEqual(oldVD.Spec, newVD.Spec) {
10661
return nil, errors.New("spec cannot be changed if the VirtualDisk is the process of termination")
10762
}
108-
case newVD.Status.Phase == v1alpha2.DiskPending:
109-
if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" {
110-
sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass)
111-
if err != nil {
112-
return nil, err
113-
}
114-
if v.scService.IsStorageClassDeprecated(sc) {
115-
return nil, fmt.Errorf(
116-
"the provisioner of the %q storage class is deprecated; please use a different one",
117-
*newVD.Spec.PersistentVolumeClaim.StorageClass,
118-
)
119-
}
120-
}
12163
}
12264

12365
return nil, nil

0 commit comments

Comments
 (0)