Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for backup PVC configuration #8109

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelogs/unreleased/8109-shubham-pampattiwar
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add support for backup PVC configuration
8 changes: 7 additions & 1 deletion pkg/cmd/cli/nodeagent/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,13 @@
if s.dataPathConfigs != nil && len(s.dataPathConfigs.LoadAffinity) > 0 {
loadAffinity = s.dataPathConfigs.LoadAffinity[0]
}
dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, loadAffinity, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)

var backupPVCConfig map[string]nodeagent.BackupPVC
if s.dataPathConfigs != nil && s.dataPathConfigs.BackupPVCConfig != nil {
backupPVCConfig = s.dataPathConfigs.BackupPVCConfig

Check warning on line 298 in pkg/cmd/cli/nodeagent/server.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/cli/nodeagent/server.go#L296-L298

Added lines #L296 - L298 were not covered by tests
}

dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, loadAffinity, backupPVCConfig, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)

Check warning on line 301 in pkg/cmd/cli/nodeagent/server.go

View check run for this annotation

Codecov / codecov/patch

pkg/cmd/cli/nodeagent/server.go#L301

Added line #L301 was not covered by tests
if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/controller/data_upload_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,13 @@ type DataUploadReconciler struct {
snapshotExposerList map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer
dataPathMgr *datapath.Manager
loadAffinity *nodeagent.LoadAffinity
backupPVCConfig map[string]nodeagent.BackupPVC
preparingTimeout time.Duration
metrics *metrics.ServerMetrics
}

func NewDataUploadReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, csiSnapshotClient snapshotter.SnapshotV1Interface,
dataPathMgr *datapath.Manager, loadAffinity *nodeagent.LoadAffinity, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution,
dataPathMgr *datapath.Manager, loadAffinity *nodeagent.LoadAffinity, backupPVCConfig map[string]nodeagent.BackupPVC, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution,
cred *credentials.CredentialGetter, nodeName string, fs filesystem.Interface, preparingTimeout time.Duration, log logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataUploadReconciler {
return &DataUploadReconciler{
client: client,
Expand All @@ -99,6 +100,7 @@ func NewDataUploadReconciler(client client.Client, mgr manager.Manager, kubeClie
snapshotExposerList: map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(kubeClient, csiSnapshotClient, log)},
dataPathMgr: dataPathMgr,
loadAffinity: loadAffinity,
backupPVCConfig: backupPVCConfig,
preparingTimeout: preparingTimeout,
metrics: metrics,
}
Expand Down Expand Up @@ -788,6 +790,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
ExposeTimeout: r.preparingTimeout,
VolumeSize: pvc.Spec.Resources.Requests[corev1.ResourceStorage],
Affinity: r.loadAffinity,
BackupPVCConfig: r.backupPVCConfig,
}, nil
}
return nil, nil
Expand Down
4 changes: 3 additions & 1 deletion pkg/controller/data_upload_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import (
"testing"
"time"

"github.com/vmware-tanzu/velero/pkg/nodeagent"

snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
"github.com/pkg/errors"
Expand Down Expand Up @@ -245,7 +247,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
if err != nil {
return nil, err
}
return NewDataUploadReconciler(fakeClient, nil, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), dataPathMgr, nil, nil,
return NewDataUploadReconciler(fakeClient, nil, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), dataPathMgr, nil, map[string]nodeagent.BackupPVC{}, nil,
testclocks.NewFakeClock(now), &credentials.CredentialGetter{FromFile: credentialFileStore}, "test-node", fakeFS, time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil
}

Expand Down
25 changes: 22 additions & 3 deletions pkg/exposer/csi_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,9 @@

// Affinity specifies the node affinity of the backup pod
Affinity *nodeagent.LoadAffinity

// BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement
BackupPVCConfig map[string]nodeagent.BackupPVC
}

// CSISnapshotExposeWaitParam define the input param for WaitExposed of CSI snapshots
Expand Down Expand Up @@ -163,7 +166,17 @@
curLog.WithField("vs name", volumeSnapshot.Name).Warnf("The snapshot doesn't contain a valid restore size, use source volume's size %v", volumeSize)
}

backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, csiExposeParam.StorageClass, csiExposeParam.AccessMode, volumeSize)
// check if there is a mapping for source pvc storage class in backupPVC config
// if the mapping exists then use the values(storage class, readOnly accessMode)
// for backupPVC (intermediate PVC in snapshot data movement) object creation
backupPVCStorageClass := csiExposeParam.StorageClass
backupPVCReadOnly := false
if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists {
backupPVCStorageClass = value.StorageClass
backupPVCReadOnly = value.ReadOnly

Check warning on line 176 in pkg/exposer/csi_snapshot.go

View check run for this annotation

Codecov / codecov/patch

pkg/exposer/csi_snapshot.go#L175-L176

Added lines #L175 - L176 were not covered by tests
}

backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly)
if err != nil {
return errors.Wrap(err, "error to create backup pvc")
}
Expand Down Expand Up @@ -347,14 +360,20 @@
return e.csiSnapshotClient.VolumeSnapshotContents().Create(ctx, vsc, metav1.CreateOptions{})
}

func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity) (*corev1.PersistentVolumeClaim, error) {
func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity, readOnly bool) (*corev1.PersistentVolumeClaim, error) {
backupPVCName := ownerObject.Name

volumeMode, err := getVolumeModeByAccessMode(accessMode)
if err != nil {
return nil, err
}

pvcAccessMode := corev1.ReadWriteOnce

if readOnly {
pvcAccessMode = corev1.ReadOnlyMany
}

dataSource := &corev1.TypedLocalObjectReference{
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
Kind: "VolumeSnapshot",
Expand All @@ -377,7 +396,7 @@
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
pvcAccessMode,
},
StorageClassName: &storageClass,
VolumeMode: &volumeMode,
Expand Down
147 changes: 147 additions & 0 deletions pkg/exposer/csi_snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,13 @@ package exposer

import (
"context"
"fmt"
"reflect"
"testing"
"time"

"k8s.io/utils/pointer"

snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
"github.com/pkg/errors"
Expand Down Expand Up @@ -821,3 +824,147 @@ func TestToSystemAffinity(t *testing.T) {
})
}
}

func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
backup := &velerov1.Backup{
TypeMeta: metav1.TypeMeta{
APIVersion: velerov1.SchemeGroupVersion.String(),
Kind: "Backup",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
UID: "fake-uid",
},
}

dataSource := &corev1.TypedLocalObjectReference{
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
Kind: "VolumeSnapshot",
Name: "fake-snapshot",
}
volumeMode := corev1.PersistentVolumeFilesystem

backupPVC := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
Controller: pointer.BoolPtr(true),
},
},
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
VolumeMode: &volumeMode,
DataSource: dataSource,
DataSourceRef: nil,
StorageClassName: pointer.String("fake-storage-class"),
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}

backupPVCReadOnly := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1.DefaultNamespace,
Name: "fake-backup",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: backup.APIVersion,
Kind: backup.Kind,
Name: backup.Name,
UID: backup.UID,
Controller: pointer.BoolPtr(true),
},
},
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadOnlyMany,
},
VolumeMode: &volumeMode,
DataSource: dataSource,
DataSourceRef: nil,
StorageClassName: pointer.String("fake-storage-class"),
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}

tests := []struct {
name string
ownerBackup *velerov1.Backup
backupVS string
storageClass string
accessMode string
resource resource.Quantity
readOnly bool
kubeClientObj []runtime.Object
snapshotClientObj []runtime.Object
want *corev1.PersistentVolumeClaim
wantErr assert.ErrorAssertionFunc
}{
{
name: "backupPVC gets created successfully with parameters from source PVC",
ownerBackup: backup,
backupVS: "fake-snapshot",
storageClass: "fake-storage-class",
accessMode: AccessModeFileSystem,
resource: resource.MustParse("1Gi"),
readOnly: false,
want: &backupPVC,
wantErr: assert.NoError,
},
{
name: "backupPVC gets created successfully with parameters from source PVC but accessMode from backupPVC Config as read only",
ownerBackup: backup,
backupVS: "fake-snapshot",
storageClass: "fake-storage-class",
accessMode: AccessModeFileSystem,
resource: resource.MustParse("1Gi"),
readOnly: true,
want: &backupPVCReadOnly,
wantErr: assert.NoError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(tt.kubeClientObj...)
fakeSnapshotClient := snapshotFake.NewSimpleClientset(tt.snapshotClientObj...)
e := &csiSnapshotExposer{
kubeClient: fakeKubeClient,
csiSnapshotClient: fakeSnapshotClient.SnapshotV1(),
log: velerotest.NewLogger(),
}
var ownerObject corev1.ObjectReference
if tt.ownerBackup != nil {
ownerObject = corev1.ObjectReference{
Kind: tt.ownerBackup.Kind,
Namespace: tt.ownerBackup.Namespace,
Name: tt.ownerBackup.Name,
UID: tt.ownerBackup.UID,
APIVersion: tt.ownerBackup.APIVersion,
}
}
got, err := e.createBackupPVC(context.Background(), ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)
if !tt.wantErr(t, err, fmt.Sprintf("createBackupPVC(%v, %v, %v, %v, %v, %v)", ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)) {
return
}
assert.Equalf(t, tt.want, got, "createBackupPVC(%v, %v, %v, %v, %v, %v)", ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)
})
}
}
11 changes: 11 additions & 0 deletions pkg/nodeagent/node_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,23 @@ type RuledConfigs struct {
Number int `json:"number"`
}

type BackupPVC struct {
// StorageClass is the name of storage class to be used by the backupPVC
StorageClass string `json:"storageClass,omitempty"`

// ReadOnly sets the backupPVC's access mode as read only
ReadOnly bool `json:"readOnly,omitempty"`
}

type Configs struct {
// LoadConcurrency is the config for data path load concurrency per node.
LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"`

// LoadAffinity is the config for data path load affinity.
LoadAffinity []*LoadAffinity `json:"loadAffinity,omitempty"`

// BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement
BackupPVCConfig map[string]BackupPVC `json:"backupPVC,omitempty"`
}

// IsRunning checks if the node agent daemonset is running properly. If not, return the error found
Expand Down
Loading