diff --git a/ADOPTERS.md b/ADOPTERS.md index 44f25513c1..af71b859d4 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -40,7 +40,9 @@ We have integrated our [solution with Velero][11] to provide our customers with Kyma [integrates with Velero][41] to effortlessly back up and restore Kyma clusters with all its resources. Velero capabilities allow Kyma users to define and run manual and scheduled backups in order to successfully handle a disaster-recovery scenario. **[Red Hat][50]** -Red Hat has developed the [Cluster Application Migration Tool][51] which uses [Velero and Restic][52] to drive the migration of applications between OpenShift clusters. +Red Hat has developed 2 operators for the OpenShift platform: +- [Migration Toolkit for Containers][51] (Crane): This operator uses [Velero and Restic][52] to drive the migration of applications between OpenShift clusters. +- [OADP (OpenShift API for Data Protection) Operator][53]: This operator sets up and installs Velero on the OpenShift platform, allowing users to backup and restore applications. **[Dell EMC][70]** For Kubernetes environments, [PowerProtect Data Manager][71] leverages the Container Storage Interface (CSI) framework to take snapshots to back up the persistent data or the data that the application creates e.g. databases. [Dell EMC leverages Velero][72] to backup the namespace configuration files (also known as Namespace meta data) for enterprise grade data protection. @@ -89,6 +91,7 @@ If you would like to add your logo to a future `Adopters of Velero` section on [ [50]: https://redhat.com [51]: https://github.com/fusor/mig-operator [52]: https://github.com/fusor/mig-operator/blob/master/docs/usage/2.md +[53]: https://github.com/openshift/oadp-operator [60]: https://banzaicloud.com [61]: https://banzaicloud.com/products/pipeline/ diff --git a/changelogs/unreleased/4864-sseago b/changelogs/unreleased/4864-sseago new file mode 100644 index 0000000000..fdbe784915 --- /dev/null +++ b/changelogs/unreleased/4864-sseago @@ -0,0 +1 @@ +Add credentials to volume snapshot locations diff --git a/changelogs/unreleased/4943-phuongatemc b/changelogs/unreleased/4943-phuongatemc new file mode 100644 index 0000000000..6443fc603f --- /dev/null +++ b/changelogs/unreleased/4943-phuongatemc @@ -0,0 +1 @@ +Refactor BackupItemAction proto and related code to backupitemaction/v1 package. This is part of implementation of the plugin version design https://github.com/vmware-tanzu/velero/blob/main/design/plugin-versioning.md diff --git a/changelogs/unreleased/5064-jxun b/changelogs/unreleased/5064-jxun new file mode 100644 index 0000000000..4de8a84605 --- /dev/null +++ b/changelogs/unreleased/5064-jxun @@ -0,0 +1 @@ +Exclude "csinodes.storage.k8s.io" and "volumeattachments.storage.k8s.io" from restore by default. \ No newline at end of file diff --git a/changelogs/unreleased/5214-qiuming-best b/changelogs/unreleased/5214-qiuming-best new file mode 100644 index 0000000000..9c2ddd1de7 --- /dev/null +++ b/changelogs/unreleased/5214-qiuming-best @@ -0,0 +1 @@ +Uploader Implementation: Restic backup and restore diff --git a/changelogs/unreleased/5215-allenxu404 b/changelogs/unreleased/5215-allenxu404 new file mode 100644 index 0000000000..48275bb251 --- /dev/null +++ b/changelogs/unreleased/5215-allenxu404 @@ -0,0 +1 @@ +Refactor GCController with kubebuilder \ No newline at end of file diff --git a/changelogs/unreleased/5217-lilongfeng0902 b/changelogs/unreleased/5217-lilongfeng0902 new file mode 100644 index 0000000000..819069ac3e --- /dev/null +++ b/changelogs/unreleased/5217-lilongfeng0902 @@ -0,0 +1 @@ +check vsc null pointer \ No newline at end of file diff --git a/changelogs/unreleased/5218-jxun b/changelogs/unreleased/5218-jxun new file mode 100644 index 0000000000..d2274476e0 --- /dev/null +++ b/changelogs/unreleased/5218-jxun @@ -0,0 +1 @@ +Migrate backup sync controller from code-generator to kubebuilder. \ No newline at end of file diff --git a/changelogs/unreleased/5221-qiuming-best b/changelogs/unreleased/5221-qiuming-best new file mode 100644 index 0000000000..2de71e91ab --- /dev/null +++ b/changelogs/unreleased/5221-qiuming-best @@ -0,0 +1 @@ +Uploader Implementation: Kopia backup and restore diff --git a/changelogs/unreleased/5231-lyndon b/changelogs/unreleased/5231-lyndon new file mode 100644 index 0000000000..c6faa07e34 --- /dev/null +++ b/changelogs/unreleased/5231-lyndon @@ -0,0 +1 @@ +Add changes for Kopia Integration: Kopia Lib - initialize Kopia repo \ No newline at end of file diff --git a/changelogs/unreleased/5233-lyndon b/changelogs/unreleased/5233-lyndon new file mode 100644 index 0000000000..498111d471 --- /dev/null +++ b/changelogs/unreleased/5233-lyndon @@ -0,0 +1,2 @@ +Add changes for Kopia Integration: Kopia Lib - method implementation +Add changes to write Kopia Repository logs to Velero log \ No newline at end of file diff --git a/changelogs/unreleased/5234-reasonerjt b/changelogs/unreleased/5234-reasonerjt new file mode 100644 index 0000000000..34fd5768ad --- /dev/null +++ b/changelogs/unreleased/5234-reasonerjt @@ -0,0 +1 @@ +Remove reference to non-existent doc \ No newline at end of file diff --git a/changelogs/unreleased/5236-sseago b/changelogs/unreleased/5236-sseago new file mode 100644 index 0000000000..4d295cce85 --- /dev/null +++ b/changelogs/unreleased/5236-sseago @@ -0,0 +1 @@ +Check for empty ns list before checking nslist[0] diff --git a/changelogs/unreleased/5239-shubham-pampattiwar b/changelogs/unreleased/5239-shubham-pampattiwar new file mode 100644 index 0000000000..693e1c4fb3 --- /dev/null +++ b/changelogs/unreleased/5239-shubham-pampattiwar @@ -0,0 +1 @@ +Fix edge cases for already exists resources \ No newline at end of file diff --git a/changelogs/unreleased/5241-jxun b/changelogs/unreleased/5241-jxun new file mode 100644 index 0000000000..23e5c0897d --- /dev/null +++ b/changelogs/unreleased/5241-jxun @@ -0,0 +1 @@ +Controller refactor code modifications. \ No newline at end of file diff --git a/changelogs/unreleased/5247-divolgin b/changelogs/unreleased/5247-divolgin new file mode 100644 index 0000000000..7a6e24cfec --- /dev/null +++ b/changelogs/unreleased/5247-divolgin @@ -0,0 +1 @@ +Fix nil pointer panic when restoring StatefulSets \ No newline at end of file diff --git a/changelogs/unreleased/5248-allenxu404 b/changelogs/unreleased/5248-allenxu404 new file mode 100644 index 0000000000..8551bc8cfa --- /dev/null +++ b/changelogs/unreleased/5248-allenxu404 @@ -0,0 +1 @@ +equip gc controller with configurable frequency \ No newline at end of file diff --git a/changelogs/unreleased/5252-cleverhu b/changelogs/unreleased/5252-cleverhu new file mode 100644 index 0000000000..d478a57505 --- /dev/null +++ b/changelogs/unreleased/5252-cleverhu @@ -0,0 +1 @@ +Add csiSnapshotTimeout for describe backup \ No newline at end of file diff --git a/changelogs/unreleased/5259-lyndon b/changelogs/unreleased/5259-lyndon new file mode 100644 index 0000000000..c56e2a3ef9 --- /dev/null +++ b/changelogs/unreleased/5259-lyndon @@ -0,0 +1 @@ +Fill gaps for Kopia path of PVBR: integrate Repo Manager with Unified Repo; pass UploaderType to PVBR backupper and restorer; pass RepositoryType to BackupRepository controller and Repo Ensurer \ No newline at end of file diff --git a/changelogs/unreleased/5263-sseago b/changelogs/unreleased/5263-sseago new file mode 100644 index 0000000000..384f5f9184 --- /dev/null +++ b/changelogs/unreleased/5263-sseago @@ -0,0 +1 @@ +Don't move velero v1 plugins to new proto dir diff --git a/changelogs/unreleased/5271-sseago b/changelogs/unreleased/5271-sseago new file mode 100644 index 0000000000..339b81b696 --- /dev/null +++ b/changelogs/unreleased/5271-sseago @@ -0,0 +1 @@ + plugin/clientmgmt refactoring for BackupItemAction v1 diff --git a/changelogs/unreleased/5279-ywk253100 b/changelogs/unreleased/5279-ywk253100 new file mode 100644 index 0000000000..51dd99deaf --- /dev/null +++ b/changelogs/unreleased/5279-ywk253100 @@ -0,0 +1 @@ +Support pause/unpause schedules \ No newline at end of file diff --git a/changelogs/unreleased/5282-lyndon b/changelogs/unreleased/5282-lyndon new file mode 100644 index 0000000000..ef34f89939 --- /dev/null +++ b/changelogs/unreleased/5282-lyndon @@ -0,0 +1 @@ +Add changes for problems/enhancements found during smoking test for Kopia pod volume backup/restore \ No newline at end of file diff --git a/changelogs/unreleased/5283-blackpiglet b/changelogs/unreleased/5283-blackpiglet new file mode 100644 index 0000000000..1d08df3e73 --- /dev/null +++ b/changelogs/unreleased/5283-blackpiglet @@ -0,0 +1 @@ +Add backup status checking in schedule controller. \ No newline at end of file diff --git a/changelogs/unreleased/5291-cleverhu b/changelogs/unreleased/5291-cleverhu new file mode 100644 index 0000000000..0cf9b7a157 --- /dev/null +++ b/changelogs/unreleased/5291-cleverhu @@ -0,0 +1 @@ +Add more detailed comments for labels columns. \ No newline at end of file diff --git a/changelogs/unreleased/5293-cleverhu b/changelogs/unreleased/5293-cleverhu new file mode 100644 index 0000000000..8c1c03d754 --- /dev/null +++ b/changelogs/unreleased/5293-cleverhu @@ -0,0 +1 @@ +Optimize code for restore exists resources. \ No newline at end of file diff --git a/changelogs/unreleased/5297-blackpiglet b/changelogs/unreleased/5297-blackpiglet new file mode 100644 index 0000000000..8e46fb5d0a --- /dev/null +++ b/changelogs/unreleased/5297-blackpiglet @@ -0,0 +1 @@ +Remove github.com/apex/log logger. \ No newline at end of file diff --git a/changelogs/unreleased/5299-jxun b/changelogs/unreleased/5299-jxun new file mode 100644 index 0000000000..ce574df7a7 --- /dev/null +++ b/changelogs/unreleased/5299-jxun @@ -0,0 +1 @@ +Remove snapshot related lister, informer and client from backup controller. \ No newline at end of file diff --git a/changelogs/unreleased/5308-lyndon b/changelogs/unreleased/5308-lyndon new file mode 100644 index 0000000000..5e9c4a544c --- /dev/null +++ b/changelogs/unreleased/5308-lyndon @@ -0,0 +1 @@ +Refactor the repoEnsurer code to use controller runtime client and wrap some common BackupRepository operations to share with other modules \ No newline at end of file diff --git a/changelogs/unreleased/5312-sseago b/changelogs/unreleased/5312-sseago new file mode 100644 index 0000000000..2856fc123f --- /dev/null +++ b/changelogs/unreleased/5312-sseago @@ -0,0 +1 @@ +RestoreItemAction v1 refactoring for plugin api versioning diff --git a/changelogs/unreleased/5314-allenxu404 b/changelogs/unreleased/5314-allenxu404 new file mode 100644 index 0000000000..9125ad1310 --- /dev/null +++ b/changelogs/unreleased/5314-allenxu404 @@ -0,0 +1 @@ +Change the status of restore to completed from partially failed when restore empty backup \ No newline at end of file diff --git a/changelogs/unreleased/5318-sseago b/changelogs/unreleased/5318-sseago new file mode 100644 index 0000000000..edb2f09acf --- /dev/null +++ b/changelogs/unreleased/5318-sseago @@ -0,0 +1 @@ +plugin versioning v1 refactor for VolumeSnapshotter diff --git a/changelogs/unreleased/5319-lyndon b/changelogs/unreleased/5319-lyndon new file mode 100644 index 0000000000..5c87431c56 --- /dev/null +++ b/changelogs/unreleased/5319-lyndon @@ -0,0 +1 @@ +Fix issue 4874 and 4752: check the daemonset pod is running in the node where the workload pod resides before running the PVB for the pod \ No newline at end of file diff --git a/changelogs/unreleased/5322-qiuming-best b/changelogs/unreleased/5322-qiuming-best new file mode 100644 index 0000000000..b44ed0b5f2 --- /dev/null +++ b/changelogs/unreleased/5322-qiuming-best @@ -0,0 +1 @@ +Fix PVB finds wrong parent snapshot diff --git a/changelogs/unreleased/5329-kaovilai b/changelogs/unreleased/5329-kaovilai new file mode 100644 index 0000000000..81615413cd --- /dev/null +++ b/changelogs/unreleased/5329-kaovilai @@ -0,0 +1 @@ +Cancel downloadRequest when timeout without downloadURL \ No newline at end of file diff --git a/changelogs/unreleased/5331-danfengliu b/changelogs/unreleased/5331-danfengliu new file mode 100644 index 0000000000..a48cc7bec3 --- /dev/null +++ b/changelogs/unreleased/5331-danfengliu @@ -0,0 +1 @@ +Add opt-in and opt-out PersistentVolume backup to E2E tests \ No newline at end of file diff --git a/changelogs/unreleased/5347-qiuming-best b/changelogs/unreleased/5347-qiuming-best new file mode 100644 index 0000000000..c9f946e228 --- /dev/null +++ b/changelogs/unreleased/5347-qiuming-best @@ -0,0 +1 @@ +Fix restore cmd extraflag overwrite bug diff --git a/changelogs/unreleased/5350-blackpiglet b/changelogs/unreleased/5350-blackpiglet new file mode 100644 index 0000000000..c44dc8535f --- /dev/null +++ b/changelogs/unreleased/5350-blackpiglet @@ -0,0 +1 @@ +Clarify the help message for the default value of parameter --snapshot-volumes, when it's not set. \ No newline at end of file diff --git a/changelogs/unreleased/5353-cleverhu b/changelogs/unreleased/5353-cleverhu new file mode 100644 index 0000000000..91e8153f8e --- /dev/null +++ b/changelogs/unreleased/5353-cleverhu @@ -0,0 +1 @@ +Add useOwnerReferencesInBackup field doc for schedule. \ No newline at end of file diff --git a/changelogs/unreleased/5355-danfengliu b/changelogs/unreleased/5355-danfengliu new file mode 100644 index 0000000000..e365367f33 --- /dev/null +++ b/changelogs/unreleased/5355-danfengliu @@ -0,0 +1 @@ +Add E2E test for schedule backup \ No newline at end of file diff --git a/changelogs/unreleased/5359-lyndon b/changelogs/unreleased/5359-lyndon new file mode 100644 index 0000000000..230ba3f724 --- /dev/null +++ b/changelogs/unreleased/5359-lyndon @@ -0,0 +1 @@ +Fix a repoEnsurer problem introduced by the refactor - The repoEnsurer didn't check "" state of BackupRepository, as a result, the function GetBackupRepository always returns without an error even though the ensreReady is specified. \ No newline at end of file diff --git a/changelogs/unreleased/5362-niulechuan b/changelogs/unreleased/5362-niulechuan new file mode 100644 index 0000000000..d528bfa8bb --- /dev/null +++ b/changelogs/unreleased/5362-niulechuan @@ -0,0 +1 @@ +Added backupController's UT to test the prepareBackupRequest() method BackupStorageLocation processing logic diff --git a/changelogs/unreleased/5370-lyndon b/changelogs/unreleased/5370-lyndon new file mode 100644 index 0000000000..11351c3174 --- /dev/null +++ b/changelogs/unreleased/5370-lyndon @@ -0,0 +1 @@ +Pod Volume Backup/Restore Refactor: Rename parameters in CRDs and commands to remove "Restic" word \ No newline at end of file diff --git a/changelogs/unreleased/5377-qiuming-best b/changelogs/unreleased/5377-qiuming-best new file mode 100644 index 0000000000..bd3cf12368 --- /dev/null +++ b/changelogs/unreleased/5377-qiuming-best @@ -0,0 +1 @@ +Fix restore error with flag namespace-mappings diff --git a/changelogs/unreleased/5387-lyndon b/changelogs/unreleased/5387-lyndon new file mode 100644 index 0000000000..63cf32bf37 --- /dev/null +++ b/changelogs/unreleased/5387-lyndon @@ -0,0 +1 @@ +Fix issue 5386: Velero providers a full URL as the S3Url while the underlying minio client only accept the host part of the URL as the endpoint and the schema should be specified separately. \ No newline at end of file diff --git a/changelogs/unreleased/5394-blackpiglet b/changelogs/unreleased/5394-blackpiglet new file mode 100644 index 0000000000..20d2cbcede --- /dev/null +++ b/changelogs/unreleased/5394-blackpiglet @@ -0,0 +1 @@ +Fix Test_prepareBackupRequest_BackupStorageLocation UT failure. \ No newline at end of file diff --git a/changelogs/unreleased/5396-blackpiglet b/changelogs/unreleased/5396-blackpiglet new file mode 100644 index 0000000000..b4808596e7 --- /dev/null +++ b/changelogs/unreleased/5396-blackpiglet @@ -0,0 +1 @@ +update velero using klog to version v2.9.0 \ No newline at end of file diff --git a/changelogs/unreleased/5412-allenxu404 b/changelogs/unreleased/5412-allenxu404 new file mode 100644 index 0000000000..3258f8c147 --- /dev/null +++ b/changelogs/unreleased/5412-allenxu404 @@ -0,0 +1 @@ +Change B/R describe CLI to support Kopia \ No newline at end of file diff --git a/cmd/velero/velero.go b/cmd/velero/velero.go index ece94f1617..07be4a8847 100644 --- a/cmd/velero/velero.go +++ b/cmd/velero/velero.go @@ -20,7 +20,7 @@ import ( "os" "path/filepath" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/vmware-tanzu/velero/pkg/cmd" "github.com/vmware-tanzu/velero/pkg/cmd/velero" diff --git a/config/crd/v1/bases/velero.io_backups.yaml b/config/crd/v1/bases/velero.io_backups.yaml index eebfedf705..e204e1157b 100644 --- a/config/crd/v1/bases/velero.io_backups.yaml +++ b/config/crd/v1/bases/velero.io_backups.yaml @@ -42,9 +42,17 @@ spec: CSI VolumeSnapshot status turns to ReadyToUse during creation, before returning error as timeout. The default value is 10 minute. type: string + defaultVolumesToFsBackup: + description: DefaultVolumesToFsBackup specifies whether pod volume + file system backup should be used for all volumes by default. + nullable: true + type: boolean defaultVolumesToRestic: - description: DefaultVolumesToRestic specifies whether restic should - be used to take a backup of all pod volumes by default. + description: "DefaultVolumesToRestic specifies whether restic should + be used to take a backup of all pod volumes by default. \n Deprecated: + this field is no longer used and will be removed entirely in future. + Use DefaultVolumesToFsBackup instead." + nullable: true type: boolean excludedNamespaces: description: ExcludedNamespaces contains a list of namespaces that diff --git a/config/crd/v1/bases/velero.io_podvolumerestores.yaml b/config/crd/v1/bases/velero.io_podvolumerestores.yaml index 036f58a06d..0a2acd64ba 100644 --- a/config/crd/v1/bases/velero.io_podvolumerestores.yaml +++ b/config/crd/v1/bases/velero.io_podvolumerestores.yaml @@ -116,6 +116,10 @@ spec: snapshotID: description: SnapshotID is the ID of the volume snapshot to be restored. type: string + sourceNamespace: + description: SourceNamespace is the original namespace for namaspace + mapping. + type: string uploaderType: description: UploaderType is the type of the uploader to handle the data transfer. @@ -133,6 +137,7 @@ spec: - pod - repoIdentifier - snapshotID + - sourceNamespace - volume type: object status: diff --git a/config/crd/v1/bases/velero.io_schedules.yaml b/config/crd/v1/bases/velero.io_schedules.yaml index c3e0a69fa4..cd2010c504 100644 --- a/config/crd/v1/bases/velero.io_schedules.yaml +++ b/config/crd/v1/bases/velero.io_schedules.yaml @@ -32,6 +32,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .spec.paused + name: Paused + type: boolean name: v1 schema: openAPIV3Schema: @@ -53,6 +56,9 @@ spec: spec: description: ScheduleSpec defines the specification for a Velero schedule properties: + paused: + description: Paused specifies whether the schedule is paused or not + type: boolean schedule: description: Schedule is a Cron expression defining when to run the Backup. @@ -66,9 +72,17 @@ spec: for CSI VolumeSnapshot status turns to ReadyToUse during creation, before returning error as timeout. The default value is 10 minute. type: string + defaultVolumesToFsBackup: + description: DefaultVolumesToFsBackup specifies whether pod volume + file system backup should be used for all volumes by default. + nullable: true + type: boolean defaultVolumesToRestic: - description: DefaultVolumesToRestic specifies whether restic should - be used to take a backup of all pod volumes by default. + description: "DefaultVolumesToRestic specifies whether restic + should be used to take a backup of all pod volumes by default. + \n Deprecated: this field is no longer used and will be removed + entirely in future. Use DefaultVolumesToFsBackup instead." + nullable: true type: boolean excludedNamespaces: description: ExcludedNamespaces contains a list of namespaces diff --git a/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml b/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml index b47713497a..3db023bff9 100644 --- a/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml +++ b/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml @@ -45,6 +45,24 @@ spec: type: string description: Config is for provider-specific configuration fields. type: object + credential: + description: Credential contains the credential information intended + to be used with this location + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object provider: description: Provider is the provider of the volume storage. type: string diff --git a/config/crd/v1/crds/crds.go b/config/crd/v1/crds/crds.go index b7199e7421..ad661abd0b 100644 --- a/config/crd/v1/crds/crds.go +++ b/config/crd/v1/crds/crds.go @@ -30,16 +30,16 @@ import ( var rawCRDs = [][]byte{ []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4WAo\xdc6\x13\xbd\xebW\f\xf2\x1dr\xf9\xa4M\xd0C\v\xddR\xb7\x05\x82&\x86a\a\xbe\x14=P\xe4\xec.c\x8ad\xc9\xe1\xa6ۢ\xff\xbd\x18R\xf2j%\xd9\x1b\a\xa8n\"\x87of\xde\xcc\x1bQU]ו\xf0\xfa\x1eC\xd4ζ \xbc\xc6?\t-\xbf\xc5\xe6\xe1\x87\xd8h\xb79\xbc\xad\x1e\xb4U-\\\xa5H\xae\xbf\xc5\xe8R\x90\xf8\x13n\xb5դ\x9d\xadz$\xa1\x04\x89\xb6\x02\x10\xd6:\x12\xbc\x1c\xf9\x15@:K\xc1\x19\x83\xa1ޡm\x1eR\x87]\xd2Fa\xc8\xe0\xa3\xebÛ\xe6\xfb\xe6M\x05 \x03\xe6\xe3\x9ft\x8f\x91D\xef[\xb0ɘ\n\xc0\x8a\x1e[\xe8\x84|H>\xa0wQ\x93\v\x1acs@\x83\xc15\xdaUѣd\xb7\xbb\xe0\x92o\xe1\xb4QN\x0f!\x95t~\xcc@\xb7#\xd01o\x19\x1d\xe9\xd7\xd5\xed\x0f:R6\xf1&\x05a\xd6\x02\xc9\xdbQ\xdb]2\",\f\xd8A\x94\xcec\v\xd7\x1c\x8b\x17\x12U\x050P\x90c\xabA(\x95I\x15\xe6&hK\x18\xae\x9cI\xfdHf\r\x9f\xa3\xb37\x82\xf6-4#\xed͂\xb2l;\x12\xf6n\x87\xc3;\x1dٹ\x12\x84K0f\xae9\xc5\xfa\xe9\xe8\xf1\f\xe5D\x04L\xf6\nb\xa4\xa0\xed\xae:\x19\x1f\xde\x16*\xe4\x1e{\xd1\x0e\xb6Σ}w\xf3\xfe\xfe\xbb\xbb\xb3e\x00\x1f\x9c\xc7@z,Oy&}9Y\x05P\x18eОr\u05fcf\xc0b\x05\x8a\x1b\x12#\xd0\x1eGNQ\r1\x80\xdb\x02\xedu\x84\x80>`D[Z\xf4\f\x18\xd8HXp\xddg\x94\xd4\xc0\x1d\x06\x86\x81\xb8w\xc9(\xee\xe3\x03\x06\x82\x80\xd2\xed\xac\xfe\xeb\x11;\x02\xb9\xec\xd4\b¡GNO\xae\xa1\x15\x06\x0e\xc2$\xfc?\b\xab\xa0\x17G\b\xc8^ \xd9\t^6\x89\r|t\x01AۭkaO\xe4c\xbb\xd9\xec4\x8dz\x94\xae\xef\x93\xd5t\xdcdi\xe9.\x91\vq\xa3\xf0\x80f\x13\xf5\xae\x16A\xee5\xa1\xa4\x14p#\xbc\xaes\xe86k\xb2\xe9\xd5\xff\u00a0\xe0\xf8\xfa,\xd6E-˓\xc5\xf2L\x05X-\xa0#\x88\xe1h\xc9\xe2D4/1;\xb7?\xdf}\x82\xd1u.Ɯ\xfd\xcc\xfb\xe9`<\x95\x80\t\xd3v\x8b\xa1\x14q\x1b\\\x9f1\xd1*ﴥ\xfc\"\x8dF;\xa7?\xa6\xae\xd7\xc4u\xff#a$\xaeU\x03WyHA\x87\x90<\xabA5\xf0\xde\u0095\xe8\xd1\\\x89\x88\xffy\x01\x98\xe9X3\xb1_W\x82\xe9|\x9d\x1b\x17\xd6&\x1b\xe3\b|\xa2^\xf3\xb1v\xe7Qr\xf9\x98A>\xaa\xb7Zfm\xc0\xd6\x05\x10\v\xfb\xe6\fz]\xba\xfc\x94\xe1wG.\x88\x1d~p\x05sn\xb4\x1a\xdb\xec\xcc\x18\x1cO\x96\"c\\7\\`\x03\xd0^\xd0D\xbf$\xb4}\x1c\x03\xab\xf90\x92-S\bhi\x80\xc97\x88o\x1e\x99FD\x9a\x8c\v\xbe\xcd]\xe8\x80\x0f\xcb\x13c`\f\x06\xc4\v\xd3\xf9\xf2E̿\xba\xb9hk\x93e\xebB/\xa8\\\x17k\x06ZX\xf0\xb5\\t\x06[\xa0\x90\x96\xdb\xcf\xcdQ\x8cQ\xec.e\xf7\xb1X\x95\xcb\xc5p\x04D\xe7\x12=A=\xed\x97Q\xc0\x85r\\\x88\xd4\xefE\xbc\x14\xe7\r۬5\xc4\xec{\xf5\\\bO\xcd\xcck\xfc\xb2\xb2z\x8bB-u\\õ\xa3\xf5\xad'3\\U\xc5b1\xf2=LM\xea\x1c\x8b\x90\xa7+\xa9{\xbcW\xb6\xf0\xf7?\xd5IXBJ\xf4\x84\xeaz\xfe\a6\xcc\xf7\xf1\x87*\xbfJg\xcb\x0fPl\xe1\xb7߫\xe2\n\xd5\xfd\xf8\x93ċ\xff\x06\x00\x00\xff\xff\xc8p\x98۸\x0e\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec=Msܸrw\xfd\x8a.\xe5\xe0\xf7\xaa4\xa3u吔n^\xd9[Q\xedƫ\xb2\xf4\x9cC*\a\f\xd93\x83\x15\t\xf0\x01\xe0ȓT\xfe{\xaa\x1b\xe0\xe7\x80$F+\xbd\xb7/e\\\xec!\x81F\xa3\xd1\xe8/4[\x17\xab\xd5\xeaBT\xf2+\x1a+\xb5\xba\x01QI\xfc\xe6P\xd1/\xbb~\xfaW\xbb\x96\xfa\xfa\xf0\xfe\xe2I\xaa\xfc\x06nk\xebt\xf9\x05\xad\xaeM\x86\x1fq+\x95tR\xab\x8b\x12\x9dȅ\x137\x17\x00B)\xed\x04=\xb6\xf4\x13 \xd3\xca\x19]\x14hV;T\xeb\xa7z\x83\x9bZ\x169\x1a\x06\xdeL}\xf8a\xfd/\xeb\x1f.\x002\x83<\xfcQ\x96h\x9d(\xab\x1bPuQ\\\x00(Q\xe2\rlD\xf6TWv}\xc0\x02\x8d^K}a+\xcch\xae\x9d\xd1uu\x03\xdd\v?$\xe0\xe1\xd7\xf0#\x8f\xe6\a\x85\xb4\xee\xe7\xde\xc3_\xa4u\xfc\xa2*j#\x8av&~f\xa5\xdaՅ0\xcd\xd3\v\x00\x9b\xe9\no\xe03MQ\x89\f\xf3\v\x80\xb0\x1c\x9er\x15\x10>\xbc\xf7\x10\xb2=\x96\xc2\xe3\x02\xa0+T\x1f\xee\xef\xbe\xfe\xf3\xc3\xe01@\x8e63\xb2rL\x14\x8f\x18H\v\x02\xbe\xf2\xb2\xc0\x04\xf2\x83\xdb\v\a\x06+\x83\x16\x95\xb3\xe0\xf6\b\x99\xa8\\m\x10\xf4\x16~\xae7h\x14:\xb4-h\x80\xac\xa8\xadC\x03\xd6\t\x87 \x1c\b\xa8\xb4T\x0e\xa4\x02'K\x84?}\xb8\xbf\x03\xbd\xf9\r3gA\xa8\x1c\x84\xb5:\x93\xc2a\x0e\a]\xd4%\xfa\xb1\u007f^\xb7P+\xa3+4N6t\xf6\xad\xc7U\xbd\xa7\xa3\xe5\xbd#\n\xf8^\x90\x13;\xa1_F\xa0\"\xe6\x81h\xb4\x1e\xb7\x97\xb6[.s\xc8\x000P'\xa1\x02\xf2kx@C`\xc0\xeeu]\xe4ą\a4D\xb0L\xef\x94\xfc\xef\x16\xb6\x05\xa7y\xd2B8\f\f\xd05\xa9\x1c\x1a%\n8\x88\xa2\xc6+&I)\x8e`\x90f\x81Z\xf5\xe0q\x17\xbb\x86\u007f\xd7\x06A\xaa\xad\xbe\x81\xbds\x95\xbd\xb9\xbe\xdeIל\xa6L\x97e\xad\xa4;^\xf3\xc1\x90\x9b\xdaic\xafs<`qm\xe5n%L\xb6\x97\x0e3\xda\xc8kQ\xc9\x15\xa3\xae\xf8D\xad\xcb\xfc\x9f\x1a\x06\xb0\xef\x06\xb8\xba#1\xa3uF\xaa]\xef\x05s\xfd\xcc\x0e\xd0\x01\xf0\xfc\xe5\x87\xfaUt\x84\xa6GD\x9d/\x9f\x1e\x1e\xfb\xbc'\xed\x98\xfaL\xf7\x1eCv[@\x04\x93j\x8b\xc6o\xe2\xd6\xe8\x92a\xa2\xca=\xf71\xeb\x16\x12\u0558\xfc\xb6ޔ\xd2Ѿ\xff\xb5FKL\xae\xd7p\xcb\"\x066\bu\x95\x13g\xae\xe1N\xc1\xad(\xb1\xb8\x15\x16\xdf|\x03\x88\xd2vE\x84Mۂ\xbet\x1cw\xf6T\xeb\xbdhd\xd9\xc4~y\x81\xf0Pa6804Jne\xc6\xc7\x02\xb6\xdat\xf2\u008b\xab\xf5\x00d\xfc\xc8Rˬ|P\xa2\xb2{\xedH\xfe\xeaڍ{\x8c\x10\xba}\xb8\x1b\rh\x90\t\xa8\xb1X\xa9-\xe6tΞ\x85t\x84\xde\tL @\xf0\x95%L\x03\x8f%Mm\xc1\xd5F\xf1)\xfd\x82\"?>\xea\xbfX\x84\xbcffmt\xc5\x15lp\xab\rF\xe0\x1a\xa4\xf1\xd4\x19\x8d!\xc2XFI\xd7n\r\x8f{$2\x8a\xbap\x81聾\xf7?@)U\xedp}\x02mb\x83=Q\x18\x8c_\x81}\xd4_\xd0:\x99-\x10\xefctP\x8f\x80\xcf{t{4t\xf0\xf8\x05˲\xc8\"7\x1d\x89\x9dxB\x10a\xdbY&\x16\x05T\xba\x11\xdf\x166\xc7\x06٩\x05n\xb4.P\x8c\xc5+~ˊ:Ǽ\xd5w'\xcc3Zݧ\x93\x01l\v\b\xa9Hܐ\xf6%\xf4T\xf7\x964Zdq\xc2 Ё\x97\xca\xc3ce\xb5\xc7(gS\x93\x0e\xcb\bn\xb3\xdb\alc\x88M\x817\xe0L}\xcaH~\xac0F\x1c'\xe8\xd2\xd8E\xa9di\xfb\a\xf1[Ȍ\x15w+d\x992^͋(k\xff\x81\x89\xb2\xd7\xfai\x89\x10\xffF}:\x85\x01\x19\x9b\x97\xb0\xc1\xbd8Hm\xc2҃\xfe\xde \xe07\xccj\x871\xfe\x17\x0er\xb9ݢ!8\xd5^X\xb4\xdef\x98&ȴ\f\x04\x96\x1a\x93\x9by\xb2\x8en#\x89Sy\xe5S\xa8Ӂ\x1e\x9f\xab\xa6\x11\xa2$\xa6\xc8\xdeS\xb9<ȼ\x16\x05He\x9dP\x99_\x8fh\xf1:]\x0f\xccm\xf2\t\xce^\x8f4\x98\xd3N\ft\x8aV\b\xda@I\x8a\xf4\xb4\xebX\xf5wmj\xd9\x1bA\xd2I{\x165u\x816L\x95\xb3\xb2\xead\xc0\xd5$\xe8vG\xbc\x11V\x88\r\x16`\xb1\xc0\xcci\x13'\xc7\xd2&\xfb\x96\"\xd7&\xa8\x18\x91pC\xe5\xd7-l\x06$\xb0f\xdc\xcbl\xef\xed#\xe2 \x86\x03\xb9F˧\\TUq\x9cZ$,\xed|\x98d\xee\xa0wm\xe1ȏ\xe1\xc5\x0e\u007f\xd7\x12dc\xd7\x16\xa4䐲-;\x80ӳ\xcb\xfe\xffI\xd8F쿀i\xefN\x86\xbe.\xd3\x12I%\xf9Aw[\xc0\xb2r\xc7+\x90\xaey\xba\x04\x91\x8c\x95n\xfe\u007f\xe0\x8d9\x9f\xe3\xef\xc6#_\x95\xe3gwe\t\"\xedJ;\xfd?র\xb2x\b\xba\"yC~鏺\x02\xb9m7$\xbf\x82\xad,\x1c\x9a\xd1\xce\xfc\xae\xf3\xf2\x1a\xc4H\xd1w\xd4J\xe1\xb2\xfd\xa7ody\xd9.R\x97H\x97\xf1`o\xbf6\xf6\xfcP1/\xc0\x05\xf6\xec\xa5\xc1\xd2G\f\x1e\x99\x9a\xdd\x13\xb6\xa8>|\xfe\x88\xf9\x1cy \x8d\xf3N\x16\xf2a\x84l\u007f\xea`\x94\xa7.#\x98>\xad\u007f\xe3cAW \xe0\t\x8f\xdeb\x11\nhs\x04M4\xe1\xe9\x9c\x12\x87\x83R\xccdOxd0!ʴ8:\x95\x15|{\xc2cJ\xb7\x11\x01\t'iC\xf4\x8c(I\x0f\x98\x10\x1c\x94H'\x1epİ\x91Eˋ\x83tAҴ\x86\xf6/Xf\xbbm\xbdh+o\xec;뷈N\xc1^V\x89\v%5\a\x16\xf9\xb441ï\xa2\x90y;\x91\xe7\xfb;5m\r\x0f\xdbg\xed\xee\xd4\x15|\xfa&m\b\xdb~\xd4h?k\xc7Oބ\x9c\x1e\xf1\x17\x10\xd3\x0f\xe4㥼\xd8&:\xf4\x83\x8f\t\xcc\xed\u06dd\xf7\xf0\xda\xed\x91\x16\xee\x14\xf9-\x81\x1e\x1cJ\xf6\xd3\xcd\xeb\x87a+k\xcb\xd1E\xa5ՊU\xe5:6\x93'v\"Hm\x06;r\x8aZ;\xa9\x9f0\x11\xec#i\x12?\xde\a\xc7\v\x91a\xde\x04\xc78\xa4+\x1c\xeed\x06%\x9aݜ\xe2跊\xe4{\x1a\n\x89R\u05f739,M\xb57-\x88\xee|\x19\x99\x15\x9d܄^\xcdf/v\x9d\x88\xe4Nw]^\x11\xabX\xb6?\x16\xa9+\xf2\x9c/\xe1Dq\u007f\x86\xc4?c/Nu\xbfG\xcck\xc8Rp\x90\xf1\u007fH\xcd1C\xff/TB\x9a\x843\xfc\x81\xef\xd4\n\x1c\x8c\rQ\xac\xfe44\x83\xb4@\xfb{\x10\xc5\xe9\x1dAdq\x9ad\v\x16^\x91\xeb\xed\x89\xc5r\x05\xcf{m\xbdN\xddJ\x8c\x86T\x87MZ\xb8|\xc2\xe3\xe5Չ\x1c\xb8\xbcS\x97^\xc1\x9f-nZkA\xab\xe2\b\x97<\xf6\xf2\xf7\x18A\x89\x9c\x98ԍ\xef.SMe\xf2%\x1bK\x80\x06\xb6\x17vd\xe6\xcea\x9dć\x95\xb6\x91k\x88\tT\xee\xb5u>\xb280Kωb\x81\xe7\xa1\x10\xbd\x02\xb1\xf5W\xa6\xda4\x97a$\xf6F\x01W\xda5;/ai\x1bۈ\x98\aJ\x8e\xd5ew\x82\xbd<\xbd\xf47d<\x89\xc8ظX\x84[\x19\x9d\xa1\xb5\xf3,\x92 \xad\x17\x82\x84m\x80Px\a\xc6\xdf4\xcd\a%\x9b\x96n\x90\x12\x91\xce4\xe5?}\xebE/\xe9\xf0\xd3\xef%\xe6;\x17/\xe03[\x96b|\xa5\x9a\x84\xe2\xad\x1f\xd9\x1c\x93\x00Ȼ\x06fW\xf3QO\xb7 \x03#\xfd\x11\xd4t)\xd5\x1dO\x00\xef_]\xad\xb7B\x12_b\xb8\xdf6c;\xa2\xb7\x0f\xf8\xf4\xa6ZD\x9a#\xf7\x06\a;w\x1a\xe7&C1\x11\xa4Ү\x1fN \xb8\x95\xce\xdfY\xd8Jc]\x1f\xd1T\xa6\xa8\x17N\u007f\xd7\xce\xf5\x9c\xd4'c^\xe48\xfd\xeaG\xf6\x02Y{\xfd\xdc\\LO^f\xc6\x1a_\n!\xc8-H\a\xa82]+\x0e\xbf\xd0Q\xe7)\xfc\x16x\x01\x9dL\xb24\x01A\rU]\xa6\x11`\xc5\\'\xd5l\x9c\xa6\xdf\xfd'!\x8b\xb7\xd867u\u007f\x1fk\x83mk.\xf2\xfb\x19\x06\xa5\xf8&˺\x04Q\x12\xe9Sݞ\xad\xbf\xfe\x1f\xecx\x9b\x04\xc0pY\x8d8M\x87\xaa*Х\x9eH\u007f\xddO\xc7\xc4\xca\x1c[\xc5\x1c\xb8@+\x10\xb0\x15\xb2\xa8M\xa2\x84<\x8b\xb6\xe7\xf8\x1aAX\xbc\x9e\x13\x916\xf9\x8aI\x91\x10\x88M4\x16\xe7\xa5ue\xd2M\xc5{\x83i\xe6\xd9RP\xba1\xcf*#\x89\x97\xf4k[h\x81ń:~7\xd1N\xdaw\x13m\xa1}7\xd1&\xdbw\x13m\xb9}7\xd1B\xfbn\xa25\xed\xbb\x89\xf6\xddD\x9b\xeb6'\xad\x970\xf2\x9f*L\xbc\\\xc4\"\xe1zz\x0e\xc5\x19\xf8!\x9b\xe2\xd6\u007f\xb6\x90\x9aay\x17\x1f\x15ɫ\r\xdfC\xac\xf8S\x8e\x18\atI\x17\x9d*iS.\xe9\x804\xec\xed3\xaf\x17\x920\x93\xd2)\xe3ٷ)\t?Ki>\xc3<\xd36ͦI4\xd5\xcd$\x11:4\x9f\x84\x90\xd9\xdb\xcf!\x19\xe6밝\xdb`\xfaw\xcfAMH\xc5YH\xc0\x99O̝\xa3\xd7\xc8\xf5\x18\x12\xcc\f\x12F\xff0\xf4ZȒ\x99\u038d\t7A\xe8\xc4\xe1\xfdz\xf8\xc6\xe9\x90)\x03\xcf\xd2\xed#Kyޣ\xe2;,\xb5맽6\xfc\x16\xbe\xcd\x19\xd3\x11\xb4\x01%\v&\xe7\f\xb7\x0e\xc8\v\xbfVޅ;\xfb\\λ\x1fi\xb94/Π\x19f\xc8L\x88\xe8s\xaf\x8c\xd2\x13\x85\xd3sd\xe6\x93Z\xceɌ\x19\xe7\xbdL\x02]·I\xf1\x1c\x17r_^\x90\xf1\x92\x98\xed\xf8\xbb/\xc6RrZ^\x94ɲ\x98\x10\x98\x98\xbf2\xccL\x99\ayF\xd6J\x12q\x963T\xce\xceK\ty \xb3\xebH\xceF\x89\xe4\x99\xcc\x02\x9e\xccA\x99\xcb.Y\x88J\x9df\x9e\xa4\xe7\x94̂\xe6|\x93\xe5L\x92\xd7\xcb\x17}\r\x1bxZ\xd4,f\x83,\xda\xc8\xf3\xf8-\xe6{\x9c\x93\xe5\xb1H\xb1\x17ft\xb4\x19\x1b\x13\xf3\x9e\x9b\xc71\xccӘ\x00\x9a\x92\xbd1\x91\x9d1\x01q6g#5'c\x02\xf6\x82ڝ咙\x97\xf1/HaQ\xbf\x15\u007f+\x8ez\xe9´\x19\x98\x8bK\x16\xfa\xaf\xa3\ued17\x8d\xd54o~\xc6,O\xe9\xf6盟e]8Y\x15\x1c\xce?\xc8<\xea4\xba=\x1e\xe1Y\x16\x05\x89\xd5\xdf4\u007f\xe6\xb492\xa4_\xbf\xb4\xec\xb9\x1e\x19\xd1\xc2\xc23\x16\x05\x88\x18s\x9d\xac<\xf3\x1fAgz\x85$\xf3\xe9\xc0\x85O>÷\xd2W\x9e\x83\xf9K\xaeX\xc4\xd3\xed\xb1$(ͷ\xa3g\xb8\x1f\xf3\x06\xa2\xb7e\xf9\xd9_k4G\xd0\a4\x9dŰ\xf0\x1d\x81?h\xb6.\xbaĭ ?\xfc\xa7\xf7#ù;p\xf0Ay\x15\x16\x05;\u0091\xe1Й/ڽ&\xf1F~\xc0D\xd7x\xe0C\xb7\xa3#\xef\x97l\xcf\xd4$\xfc\xb7u\x1d\xcew\x1e\x16\xd5\xf6\x9b8\x10/w!f@\xa6&է]@-&ѿ\x95+\xb1\xe4L$[QiI\xf2o\x91\x1c\u007fFR\xfc\x19N\xc5ynE2\x99R\x92\xdf\xdfĹxC\xf7\xe2-\x1c\x8c\x97\xb9\x18\v GI\xed)\xe9\xeaI\x97\xab\xc9\xf7\v)\x97\xa3\xcbW\x00\xf3i\xe8\t\xe9\xe7\t\x97\x03K\x98&\xa4\x99\x9f\x97^\x9e@\xc37r>\xde\xc8\xfdx\v\a\xe4m]\x90E'd\x91sf_\xbf8\xba\xacM\x8ef6\x18\x9f\xcaj\xb3L6\xf2\x17\x86s\x8e\xbe\xa8mj\xa4P\xaf\x81i\x1a\v)\xb7_\u007ff\xf0\xb3T\xb9\xdf\x0fb\xaa\x9e\x1e\xe7bJ\x9c\xff\xde\x1a\x15\x9d}\x16\a:\xbaT\xb0X\t\xc3ն6G\u007f1i\xd7\xf0Id\xfbaG\xd8\v\v[mʨ\xc1t\xd9\xde\xc8\\7\xa3\xe8\xc9\xe5\x1a\xe0'\xdd^z\xf5+*XYVő\xfc\x00\xb8\x1c\x0ey\x19\x03D\x99dž\xba>\xa1\xdc͂\xaf\xf70\xec\x1d\xb9\xbck\x8a\xddd\x85\xae\xf3\x16\xfa\xc4\xe6\tu\x84\xfb\xafl\x93p\x99\x90\xac+\x99\x12\xac\x8e\xc6\xe7\x1bWT\xf9\xf1\xf5/\xf3\xac\xd3F\xec\xf0\x17\xed\v6-Qb\xd8{P\xad+Ȋ\xe6r\xbd\xf9\xf6\"\xa6CC\xe9\xa8\x11\xb0.g&\x9c\x86\ue793\xb0\x8c\t\x91\x99\xf3\xe7\\\xb1\xb0\x98\xc7\xc7_\xfc\x02\x9c,q\xfd\xb1\xf6\x17\xa7\xabJ\x18\x8bD\xcdfa~І\xfe\xbb\xd7ϱ؆\x0ek\xfeq\x8c\xb7A\xce\xcb\xe1\xfbٳ\xb0?\f\xcaO5$Zbԯ\xf1Q=Ǭ\xb7I\xfe\x94G\x1d\xf2)8\xbd\n|\x1c\xb2\xe0\xefj^\xb7\xccϔԞ\xaaQ\xc6u\xb9\x96\xab\x94\xf9\xf2]\xa1&a\xc8\xee\xaa\r\xd7\xe8\t\xa5\xbd\xb8\xa6\xcd\xcb\n\x95\xf9d\x94A\x9d\xc8\xf9}\xba=\x1d\xc1\xd5\x00M\xde+T\xd6\x16\xcez\x16\xb6Mx\x89*\xd2\x0e\x9c\x1fɖ,A\xc3\x1c\xf0\x80\n\xb4\xe2\xfc\x16\xae~\xe3+V\x8e\xc7D\xa0\xf6\xa1\x84\x04\x9a\xba*\xb4ț\x13\xde\xe8\xacP\xe5\xf0\x91\xe5\x979\xa0ygg`rq\xb0\xad61\"\x9c\nL\xafXn \x17\x0eWQ\xa0I\xb2/\xcal\x99\x95CF\xb7\x1f\x9c#\xbf f+\x8f+\xcdM\x8dl\xf4\xaf\xd3N\x14\xa0\xear\xe3\x15\xbah:\xc4\xf6\xef\xa4ޜ\r\x19O3\xc7\xcb/L*\x87\xbb\x93\x98\xe2\xe9\xcan\x1b\xfe9{e\xedȩ\x95\xd9:\xcb\xd0\xdam]\x141Ӿ\xe5\xdc\xd7_&\xe7\xf2-\xd68\xe3N^\x04r\"`S\x88\xceg\x02\x96h\xad\xd85\xc5͞I\x03\xedP!\x1b>\xb1x\xa3w\f\xbḇai/\x1f\xc1\x12\x99\xabE\x98\xa0\xb9\xf9\xef\xf5z\x17\xb3\v\n\xbd\x83\xad,\xb8k\xa8_\x19T\xf3\x994\xf9VI\x93\xa2\xca?\xb5\x1d\x896\x1c|\xe6\x8d\xe8\xea\xbcb!w\x92\xf4 m\xd2N\x98\x8d\xd8\xe1*\xd3E\x81\x9cf~\x8a\xd7[\x1e\u0590\x9f\xf7\x05\x85]\\\xdaO\xfd\xbe!\xd2\xe1w\xdbW\xc6\x10\xbe@!\x97\xfdt\xd2`WG\xf7\x04!\xcd\x13\x9f\xa5\xba=\x15\xa2\x15gO1\xed\xf7m\x0eX\x90\xab\x1eNS\x80\xf6*\x18\x83qo\xb6\x14\xbfis\x05\xa5T\xf4\x0fY\xfc\x1c\x8ah\x06\x9f\x85?\u05ec[\xc0\xfb\x9e\xfa\xb4i\xd2=E\x8á\x982U㩱+\xf8\x8c\xa7\x96\x95\xcfvŜ\x83o\xb12\xbb\xd4\xe5N\xdd\x1b\xbd#\u007f8\xf2\xb2\x15^\x91w\xf7\xc28)\x8a\xe2\xe8'\x99\x9c=\xf2\xe2#\x92⚴^\xe2d\rX.Q6t\xeb\\o\xa9<'p\x9e\xeaF\xd7n J:Q\x14\x0f\xfb3\xb05|\xd6\x0e\x9b\x88\xae\x1c\xc2$\xe1\x8b֭p\xbb\xd5\xc6yO\u007f\xb5\x02\xb9\r\xd6P\x04.\x9d\t\xbe\x91\xf2UoA\xba\xeeR\xbe\xe3^vt\f\x1fB\xae\xf0T\x8a\xa3\xcfY\x14YF\xc66^['\x8a\x88|\xfb]9Plv\x12\xf7a\xfe\x97\x88\x1dvB\xf0\xbb~\xff\xf6\xc3\xf1V\xbb18O9\xce)\xf7\xb2=\xaa\xe9\x803\x8dQ\xc1\xb3\x91Α<\xed_ف#\tZ\x14`I\xa6L\x94\t\x9c\x93\xec\xfc\x9et\xef\xddt\bq\xe8ߴ\x9d\xa7TwX\x9c\xa6m\xd90\t&\x96\xe5\xbfY\x92\xb6\x19K[\x99\xed\x85\xda\x11S\x19]\xef\xf6\r_NhƩ\b\\MHAU\xd4;b\xf5p]\xe2j\xa3z!\x98p\x81\x92\xf7\xd0\x15\xd9\xd3$\xa6!$\xdcT^\xbf\x0e\x85\xffV[\xa3\xcbU\xd8\v\xbe\xe5\xb8\n\xa1\x11#5\xd9\xff\xe4\xc8O\x00\xed*l1\x1bT\x15*\x106\xe0\x93\xf0A\xd5\xfc\xb6\xce\xc5)\x9c0.իx\x18t^p(\x18r\x1c߇\x10\xf8\xf1\x1f\x96ݎk\xe0_\x81\x95\xaa)\xfa\xee\x03K\x9e\x15,\xf9\x19\x06\xd9W\x8f^`\x9dx\b\x03\u007f`\x88\xfe\xdf\xd6\x158\xb4\x1a\xe6S\x8aM\xf9u\xd4}\x94\x9dK\xa7\xbc\x83\x18\xec\xc0\b=\xfe$\xb7\xfeN-#\xac\xff\xfcwϺ=$\xd9,\xeff\xcd\x15\xb6DZ\xbb\x03>be0\x13Q\xc7\x03\xe0\xbe@\xb2#,\xe2\xd0\x12zw\x96\xc9{x\x99\x13\xf7\x9a\x1e\\\xf3\xf7\b^ǯ9\xbc\xccw{3\xc7\xeduW\xf7,\xb8\x06\xfa\xd2\x19\xfb\x8f\xd0-\xe2\xb9\x05\b\x11\xdf-\xb2\x8c֛[\xf4\xddz\xae[\x83\xe3D\xb5\xeb\x91;\xf7J\xce[T\x0f\x9c\xbd\x000\x85\xaa\xf1\x06>\xd1\x145+\xb0\xbc\x00\b\xdbqS\xae\u0082\x0f?z\b\xc5\x1e+\xe6\xd7\x02\xa0j\x94\xef\xef\xef\xbe\xfc\xff\x87\xc1c\x80\x12M\xa1ym\x1dR\xfc\u0080\x1b`\xf0\xc5m\vt@?\xd8=\xb3\xa0\xb1\xd6hPZ\x03v\x8fP\xb0\xda6\x1aAm\xe1\x97f\x83Z\xa2Eӂ\x06(Dc,j0\x96Y\x04f\x81A\xad\xb8\xb4\xc0%X^!\xfc\xe9\xfd\xfd\x1d\xa8\xcd\xefXX\x03L\x96\xc0\x8cQ\x05g\x16K8(\xd1T\xe8\xc7\xfey\xddB\xad\xb5\xaaQ[\x1e\xf1\xec[\x8f\xabzOG\xdb{G\x18\xf0\xbd\xa0$vB\xbf\x8d\x80E,\x03\xd2h?v\xcfM\xb7]\xc7!\x03\xc0@\x9d\x98\f\x8b_\xc3\x03j\x02\x03f\xaf\x1aQ\x12\x17\x1eP\x13\xc2\n\xb5\x93\xfc\xbfZ\xd8\x06\xacr\x93\nf10@\u05f8\xb4\xa8%\x13p`\xa2\xc1+\x87\x92\x8a\x1dA#\xcd\x02\x8d\xec\xc1s]\xcc\x1a\xfeUi\x04.\xb7\xea\x06\xf6\xd6\xd6\xe6\xe6\xfaz\xc7m\xdd'\xe9[\xb6\x11x\x03V7\xa7\xd3y4l\x94\x12\xc8\xc6Bx\x8c\x87\xcfh,/\x16\xb0p9F\x83\x1f\x95@\x82\x0e/\xdc\xde\x12x\xd8t\xbcf\xd9\x13\x02\x8b\xd8 \xe5 D\x0f\x89\x03\f\xc0\u007fH\xf8@\x92\xab yr\xbaZ\b\x92\x8b\xa3p\xd2R*\x10J\xeeP\xfb\xd9H+\x14\xceK\x80\r\xeeف+\x1d\xb6\xdeI#\xfc\x8aEc1uz\x99\x85\x92o\xb7\xa8\tN\xbdg\x06\x8d7\xfd\xa6\x112\xad\xca\xc0\t\xffIb\x9e\xec\xa3#$q\xaa\xdb\xf9\xd4\xd2I\x1c\x8d\xcfUl\xb4P\xd26\xee\xfc\x96\xfc\xc0ˆ\tw\x94\x99,\xfc~X\xbb\xae\x94H\x98!\xf2ɚ\xbd\xa0\x88+'J\fL\x03%\x11\x94\x86\x8a\xec\xa1Ӯc\v\xaekS\xdb\xde0\x92vʳ\xa8n\x04\x9a0\x95W/\x9d\f\xb8\x9a\x04\xddR\xc4\xdb҂mP\x80A\x81\x85U:\x8d\x8e%\"\xfb\x96#\xd7&\xb0\x98\x90pC\x1b\xa6\xdb\xd8\fHp\x06Ξ\x17{\xaf,\x88\x83\x1c\x1c(\x15\x1aw\xcaY]\x8b\xe3\xd4&a\x89\xf2a\x92\xb9\x83\u07b5\x85#?\x86\x97:\xfc]ː\x8d][\x90\x92C̶\xec\x00V\xcdn\xfb\xff&b\xa3\xd8\u007f\x01\xd3ޝ\f}]\xa6%\x94rrgﶀUm\x8fW\xc0m|\xba\x04\x91L\xadn\xfe\u007f`\u009c\xcf\xf1w㑯\xca\xf1\xb3TY\x82HTi\xa7\xff\a$\x8aS\x16\x0fAWd\x13\xe4\xd7\xfe\xa8+\xe0ۖ \xe5\x15\xf9M\x16\xf5\x882\xdft^^\x03\x199\xfa\x8eZ\xc5l\xb1\xff\xf8\x95,/\xd3\x05\\3\xf12\x1e\xec\xed\xd7h\xcf\x0f\x15\xf3\x02\\p\x01\x1a\xae\xb1\xf2\x81\x9fG\x87\xcd\ue273\xa8\xde\u007f\xfa\x80\xe5\x1cz \x8f\xf3N6\xf2~\xb4\xd8\xfe\xd4\xc1(\xcf\xddF0}Z\xffƇ\xf4\xae\x80\xc1\x13\x1e\xbd\xc5\xc2$\x10q\x18M4\xe1\xe9\x9c\"\xc7\xc5\x16\x1d\x93=\xe1с\t\xc1\xc2\xc5ѹ\xac\xe0\xdb\x13\x1es\xba\x8d\x10Hk\xe2&\x04A\t\x93\xf4\xc0!\xc2Ŗ\xf2\x91\a.\xf0\x1be\xd1\xf2\xe6 _\x90\xc4\x16q\xff\x82m\xb6d\xeb\x05\xcd\x1da\xdf\x19O\":\x05{^gn\x94\xd4\x1c\x18t\xa7%\x86~\xbf0\xc1\xcbv\"\xcf\xf7wr\xda\x1a\x1e\xb6O\xca\xde\xc9+\xf8\xf8\x95\x9b\x10}\xff\xa0\xd0|R\xd6=y\x13t\xfa\x85\xbf\x00\x99~\xa0;^ҋm\xc2C?\x86\x9c\xc1ܾ\xddy\x0f\xaf%\x0f7p'\xc9o\t\xf8p7\x02~\xbay\xfd0lUc\\\x90X*\xb9r\xaar\x9d\x9a\xc9#;\x13\xa4\xd2\x03\x8a\x9c.\xad\x9d\xd4O\x98\t\xf6\x914\x89\x1f\xef\xef8\x04+\xb0\x8c1N\x17\x99g\x16w\xbc\x80\n\xf5nNq\xf4[M\xf2=o\t\x99R\u05f739,O\xb5\xc7\x16Dw2\x047l+:\xb9\x19\xbd\"\xb1\x17\xbbN\x04䧻.\xefȩXg\u007f,b\x97\x95\xa5\xbbKe\xe2\xfe\f\x89\u007f\x06-Nu\xbf_\x98א\x15s!\xd2\xff&5\xe7\x18\xfa\u007f\xa0f\\g\x9c\xe1\xf7\xeejT\xe0`l\x88b\xf5\xa7\xa1\x19\xb8\x01\xa2\uf049ӫ\x9e\xc4\xe6\x14\xc9\x16\x14^\x91\xab\xed\x89\xc5r\x05\xcf{e\xbcNu\xa1\xd9E\x90\xdc\xc0\xe5\x13\x1e/\xafN\xe4\xc0坼\xf4\n\xfelq\xd3Z\vJ\x8a#\\\xba\xb1\x97\xdfb\x04erbV7w\x05\x9dk*\x93/\x19-\x01\x1a\xd8\u07bb\x92\x99;\xb7\xea,>\xac\x95I\xdc&M,\xe5^\x19\xeb#\x8b\x03\xb3\xf4\x9c(\x16x\x1e\n\xd1+`[\u007f\xf3\xadt\xbc\xd3$\xb17\n\xb8\x12\xd5̼\x84%2\xb6\x111\x0f\x94\x1c\xab\xcb\xee\x04{yz\xe9/:\xdd$\xacp\xc6\xc5\"\xdcZ\xab\x02\x8d\x99g\x91\fi\xbd\x10$l\x03\x84\xcc;0\xfe\xc2p>(\x19[\xbeAJH:Ӕ\xff\xf8\xb5\x17\xbd\xa4\xc3O\u007f/1߹\xeb\x02wf\xab\x8a\x8doƳ\x96x\xebG\xc6c\x12\x00y\xd7@\xef\x1aw\xd4\xf3-\xc8\xc0H\u007f\x045]qy\xe7&\x80\x1f_]\xad\xb7B\x12_b\xb8\xdfƱ\x1d\xd2\xdb\a\xee\xf4\xe6ZD\xcaE\xee5\x0e(w\x1a\xe7&C1\x13\xa4T\xb6\x1fN \xb8\xb5*\xdf\x19\xd8rml\u007f\xa1\xb9L\xd1,\x9c\xfe\xae\x9d\xeb9ɏZ\xbf\xc8q\xfa͏\xec\x05\xb2\xf6\xea9\xe6\x17L^Ŧ\x9a\xbb\x14B\xe0[\xe0\x16P\x16\xaa\x91.\xfcBG\xddM\xe1I\xe0\x05t6\xca\xf2\x04\x045\x94M\x95\x87\x80\x95\xe3:.g\xe34\xfd\xee?3.ނlv*\r#\xd5\x06d\x8b\xf9\x18\xfdD\x91\x8a}\xe5US\x01\xab\b\xf5\xb9n\xcf\xd6gq\f(\xde\xe6r8\xb8N\x8dXE\x87\xaa\x16hsO\xa4\xcfڠcbx\x89\xadb\x0e\\\xa0$0\xd82.&.\xcfO\xdbY\xb8=\xc7\xd7\b\xc2\xe2\xf5\x9c\x88\xbc\xc9W\x0e\x15\x19\x81\xd8Lcq^Z\xd7:\xdfT\xbcטg\x9e-\x05\xa5\xa3yVkN\xbc\xa4^\xdbB\v,\xc6\xe4\xf1\xbb\x89vҾ\x9bh\v\xed\xbb\x896پ\x9bh\xcb\xed\xbb\x89\x16\xdaw\x13-\xb6\xef&\xdaw\x13m\xaeۜ\xb4^Z\x91\xff\xe2d\xe2\xe5\xe2*2\xae\xa7\xe7\x968\x03?dS\xdc\xfa\xafOr3,\xefң\x12Y\xc1᳖\x95\xfb\"'\xc5\x01]\xd2E\xa7JڔK: \x91\xbd}\x02\xfdB\x12\xe67d\xdf\xe6$\xfc,\xa5\xf9\f\xf3L\xdb4\x9b\x98h\xaa\xe2$\t<\xc4/{\xc8\xec\xed\xe7\x90\f\xf3u\x9c\x9d\x1bW\xfaw\xcfA\xcdH\xc5YH\xc0\x99O̝\xc3\xd7\xc8\xf5\x18\"L\x0f\x12F\xff0\xf8ZȒ\x99\u038d\t7Ah\xd9\xe1\xc7\xf5\xf0\x8dU!S\x06\x9e\xb9\xdd'\xb6\xf2\xbcG\xe9\xee\xb0䮟\xf6\x1a\xf9-|b5\xc6#(\r\x92\v\x87\xce\x19n\x1d\xa0\x17~\xab\xbd\vw\xf6\xb9\x9cw?\xf2ri^\x9cA3̐\x99\x10\xd1\xe7^\x19\xe5'\n\xe7\xe7\xc8\xcc'\xb5\x9c\x93\x193\xce{\x99\x04\xba\x9c\x0f\x93\xe39.侼 \xe3%3\xdb\xf1\x9b/\xc6rrZ^\x94ɲ\x98\x10\x98\x99\xbf2\xccL\x99\ayF\xd6J\x16r\x963T\xce\xceK\ty \xb3\xfb\xc8\xceFI\xe4\x99\xcc\x02\x9e\xccA\x99\xcb.Y\x88J\x9df\x9e\xe4\xe7\x94̂v\xf9&˙$\xaf\x97/\xfa\x1a6\xf0\xb4\xa8Y\xcc\x06Y\xb4\x91\xe7\u05f7\x98\xefqN\x96\xc7\"\xc6^\x98\xd1\xd1flL\xcc{n\x1e\xc70Oc\x02hN\xf6\xc6Dv\xc6\x04\xc4ٜ\x8dܜ\x8c\t\xd8\vjw\x96Kf^\xa6?\x04\x86E\xfd&\xfeV\x1c\xf5ҍ)=0\x17\x97,\xf4\xdfF݉\x96\xd1j\x9a7?S\x96'\xb7\xfb\xf3\xcdϪ\x11\x96\xd7\u0085\xf3\x0f\xbcL:\x8dv\x8f\xc7\xf6\xb3\xceߕ\xfb\xccist\x90~\xfbܲ\xe7zdD3\x03\xcf(\x04\xb0\x14s\x9d\xec\xbc\xf0߲\x17j\x85$\xf3\xe9\xc0\x85\x0fV\xc3'\xefW\x9e\x83ݗ\\\xa9\x88\xa7\xddcEP◯g\xb8\x1f\xf3\x06\xa2\xb7eݳ\xbf6\xa8\x8f\xa0\x0e\xa8;\x8ba\xe1;\x02\u007f\xd0L#\xbaĭ ?|\x05\x85\x91\xe1\xdc\x1d8x/\xbd\nK\x82\x1d\xad\xd1\xc1\xa13/ZZ\x93x#?`\xa2k:\xf0\xa1\xdaщ\xf7K\xb6gn\x12\xfeۺ\x0e\xe7;\x0f\x8bj\xfbM\x1c\x88\x97\xbb\x103 s\x93\xea\xf3.\xa0\x16\x93\xe8\xdfʕXr&\xb2\xad\xa8\xbc$\xf9\xb7H\x8e?#)\xfe\f\xa7\xe2<\xb7\"\x1bM9\xc9\xefo\xe2\\\xbc\xa1{\xf1\x16\x0e\xc6\xcb\\\x8c\x05\x90\xa3\xa4\xf6\x9ct\xf5\xac\xcb\xd5\xec\xfb\x85\x9c\xcb\xd1\xe5+\x80\xf94\xf4\x8c\xf4\xf3\x8cˁ\xa5\x95f\xa4\x99\x9f\x97^\x9e\x81\xc37r>\xde\xc8\xfdx\v\a\xe4m]\x90E'd\x91sf_\xbf8\xba\xact\x89z6\x18\x9f\xcbj\xb3L6\xf2\x17\x86s\x8e\xbe\xa8\x8d\x15^\xa8\xd7\xc04M\x85\x94ۯ?\v\xf8\x85\xcb\xd2Ӄ\x98\xaa\xa7\xc7]M,\x97\xff\xde\x1a\x15\x9d}\x96\x06:\xbaT0X3튦m\x8e\xfebҬ\xe1#+\xf6Î\xb0g\x06\xb6JWI\x83鲽\x91\xb9\x8e\xa3\xe8\xc9\xe5\x1a\xe0g\xd5^z\xf5+*\x18^\xd5\xe2H~\x00\\\x0e\x87\xbc\x8c\x01\x92\xcccBy\xa6P\xaff\xc1\xd7{\x18\xf6N\\\xde\xc5R=\x85PM\xd9B\x9f \x1e\x93G\xb8\xff\xe2l\x12W&\xa4\xe8J\xa6\x04\xab#\xfa|\xe3\x8a*?\xbd\xfee\x9e\xb1J\xb3\x1d\xfe\xaa|ݭ%L\f{\x0f\x8a\xae\x05Y\x11/\xd7\xe3\xb7\x17)\x1d\x1a*\x80\x8d\x80u93'՟h\x95)!2s\xfe\xac\x15\v\x9by|\xfc\xd5o\xc0\xf2\n\xd7\x1f\x1a\u007fq\xba\xaa\x996H،\x1b\xf3\x836\xf4߽zN\xc56T\xd8\xf3O\xe3ukty9\xee~\xf6\xac\xd5\x1f\x06U\xc4\"\x8a\x96\x18\xf5KzT\xcf1\xeb\x11ɟ\xf2\xa4C>\x05\xa7WHх,\xdcw5\xaf[\xe6gJjO\x95\x9as\xe5Ֆ\x8b\xcd\xf9*l\xa1\xb4d\xc8\xeej\xb4\xab\xd1\x13*\xb4\xb9\x9a6/\xab7\xe7\x93Q\x06\xe5>\xe7\xe9t{:\xc2\x15u\xd4e\xaf\xde\\[\xf6뙙6\xe1%\xa9H;p~\xa4\xb3d\t\x1a\x96\x80\a\x94\xa0\xa4\xcboq\xd5o|\xe1\xd1\xf1\x98\x04\xd4>\x94\x90@\xd3\xd4B\xb12\x9e\xf0\xa8\xb3B\xb1\xcaG'\xbf\xf4\x01\xf5;3\x03\xb3-\xe4\x96@©\xc0\xf4\x8a\xe5\x06Jfq\x95\x04\x9a%\xfb\x92\xccV\x18>dt\xf3\xdeZ\xf2\vR\xb6\xf2\xb8`\xe0\xd4Ȩ\u007f\xad\xb2L\x80l\xaa\x8dW\xe8,vH\xd1\xef\xa4l\xa0\t\x19O3\xc7\xcbo\x8cK\x8b\xbb\x93\x98\xe2\xe9\xcen#\xff\x9c\xbd\xb3v\xe4\xd4\xceLS\x14h̶\x11\"eڷ\x9c\xfb\xfa\xdbt\xb9|\x8b5\xce\\'/\x02]\"`,\xa3\xe73\x01+4\x86\xedbq\xb3g\xd2@;\x94\xe8\f\x9fT\xbc\xd1;\x86]\xe6ذ\xb4\x97\x8f`\xb1\xc26,L\x10o\xfe{\xbdޥ\xec\x02\xa1v\xbe\x00\"\x8f\xe5_\xa3j>\x13'_k\xaesT\xf9Ƕ#\xe1\xc6\x05\x9f\x1d!\xbar\xbd(\xf8\x8e\x93\x1e$\"\xed\x98ް\x1d\xae\n%\x04\xba4\xf3\xd3u\xbd\xe5a\r\xf9y\x9f\x91\x99ŭ\xfd\xdc\xef\x1b\"\x1d\x9eھ2\x06\xf3\xe5\x15]\xf5V\xcb5v\xe5\x90O\x16\xa4\xdc\xc4g\xa9n\x8f\x85d\xe1\xe0ӕ\xf6\xfb\xc6\x03\x16䪇\x13\xeb\b_\x05c0\xed\xcdV\xecw\xa5\xaf\xa0\xe2\x92\xfe!\x8b߅\"\xe2\xe0\xb3\xd6\xefj\xd6-\xac\xfb\x9e\xfa\xb4i\xd2=E\x8a\xf1@L\x99\xaa\xe9\xd4\xd8\x15|\xc2S\xcb\xcag\xbbb\xe9\x82o\xa9j\xc9\xd4\xe5N\xdek\xb5#\u007f8\xf1\xb2\x15^\x89w\xf7L[΄8\xfaI&gO\xbc\xf8\x80\xa4\xb8&\xad\x974Z\xc3*\x970\x1b\xbau\xae7\x97\x9e\x13\\\x9e\xeaF5v J:Q\x94\x0e\xfb;`k\xf8\xa4,ƈ.\x1f\xc2$\xe1\x8bƮp\xbbU\xdazO\u007f\xb5\x02\xbe\r\xd6P\x02.\x9d\tw#\xe5\x8b\x17\x03\xb7ݥ|ǽ\xce\xd1\xd1\xee\x10\xba\nO\x15;\xfa\x9cEV\x14dl㵱L$\xe4\xdb7\xe5@9\xb3\x93\xb8\x0f˿$\xec\xb0\x13\x84\xdf\xf5\xfb\xb7\x1f\x8e\xb7\xdá\xf3\x98s9\xe5^\xb6'5\x1d\xb8Lc\x94𬹵$O\xfbWv`I\x82\n\x01\x86d\xcaD\x99\xc09\xc9\xeeޓ\uef5b\x0e!\x0e\xfd\x9b\xb6\xf3\x94\xea\x0e\x9bSD\x96\x8dC\xc1Ķ\xfc7K\xdcıD\xcab\xcf䎘J\xabf\xb7\x8f|9\xa1\x19\xa7\"p\r-\nj\xd1\xec\x88\xd5\xc3u\x89m\xb4\xec\x85`\xc2\x05J\xd9[.+\x9e&W\x1aB±\x80\xfeu(\xfc\xb7\xdajU\xad\x02-\xdc-\xc7U\b\x8dh\xae\xc8\xfe'G~\x02hWa˱A]\xa3\x04f\xc2z2>\xa8\x9a'\xeb\\\x9c\xc22ms\xbd\x8a\x87A\xe7\x05\x87\xc2AN\xaf\xf7!\x04~\xfc\x87e\xb7\xe3\x9f2\xb8\x02\xc3e\xac\xdd\xef\x03K\x9e\x15\f\xf9\x19\x1a\x9d\xaf\x9e\xbc\xc0:\xf1\x10\x06\xfe\xc0p\xf9\u007f[W\xe0\xd0j\x98\x8f96\xe5\x97Q\xf7Qv\xae+Q\xddv\tv`\x02\x1f\u007f\xe2[\u007f\xa7VЪ\xff\xfcwϺ=d\xd9,\xeff\xcd\x15g\x89\xb4v\xc7BA\xea{\x81dG\x18ġ%\xf4\xee,\x93\xf7\xf02'\xee5=\xb8\xf8\xb3\x12\xaf\xe3\xd7\x1c^滽\x99\xe3\xf6\xba\xbb{f\xae\x94\xfd\xd2\x19\xfb\xb7\xd0-\xe1\xb9\x05\b\t\xdf-\xb1\x8d֛[\xf4\xddz\xae[\\\xe3D\xb5\xeb\x91;\xf7J\xce[R\x0f\x9c\x8aU_\xbdٓ\xe9t:a\x8d|F\xeb\xa4\xd1s`\x8dğ=j\xfa媗\u07fbJ\x9a\xd9\xfe\xfbɋ\xd4b\x0e\xcb༩\xbf\xa23\xc1r\xfc\x84\x1b\xa9\xa5\x97FOj\xf4L0\xcf\xe6\x13\x00\xa6\xb5\xf1\x8c\xa6\x1d\xfd\x04\xe0F{k\x94B;ݢ\xae^\xc2\x1a\xd7A*\x816\x12/W\xef?T\xbf\xab>L\x00\xb8\xc5x\xfcI\xd6\xe8<\xab\x9b9\xe8\xa0\xd4\x04@\xb3\x1a\xe7\xb0f\xfc%4\xce\x1b˶\xa8\fOwU{ThM%\xcd\xc45\xc8\xe9\xea\xad5\xa1\x99C\xbb\x90(d\xb6\x92H\x1f#\xb1U\"v\x9f\x89\xc5u%\x9d\xff\xf3\xe5=\xf7\xd2\xf9\xb8\xafQ\xc12u\x89\xad\xb8\xc5\xed\x8c\xf5?\xb4WOa\xedTZ\x91z\x1b\x14\xb3\x17\x8eO\x00\x1c7\r\xce!\x9en\x18G1\x01ȘEjS`BD-0\xf5h\xa5\xf6h\x97F\x85Z\x9f\xee\x12踕\x8d\x8f('Y \v\x03E\x1ap\x9e\xf9\xe0\xc0\x05\xbe\x03\xe6`\xb1gR\xb1\xb5\xc2\xd9_4+\xffGz\x00?9\xa3\x1f\x99\xdf͡J\xa7\xaaf\xc7\\YM:z\xec\xcc\xf8#\t༕z;\xc6\xd2=s\xfe\x99))NZ\a\xe9\xc0\xef\x10\x14s\x1e\x8c\xb6e,\x1e?\x97ț\x1c(\xfb[ƪ\x82E\xf6\\\xb3\x81\x0f \xa4\xa3\x02\xc0E\xa2C\xb0\xa8<\xa3\xf59x\x1b\xde$>7z#\xb7C\xa1\xbb5\xcd%\x8b\xb9B\xba\x87\xdc2\xdeD\xa1\x89\xac\xa3\xb1f/\x05\xda)\xf9\x87\xdcH\x9e9\t6e\xae\x8dD%\xdcP\xd2\v^\x16E\xb1(ȫ\x99\xba\xa2\xc3\xe5ic,\x8d\x99\xd4ɂ[\x021\xd8\xd8:\xa7T\xedQ\x8bS5rƍ\x89Qˡ\x80\x83\xf4\xbb\x14\x0e\u0558\xdf\xc1\xab\xbeG\xe3\x05\x8fc\xd3=ޟvH;S\x02Ep\xc8-\xfahm\xa8\xc8|Ȕ*\x80/\xc1ŀڏ\x13e\xc4B\xad\x9c~\xc1\xe3\x10h\xb8\xa6\xdc\\\xc2\\g\xf9\x8eJ\xe7°\xc5\rZ\xd4~4\xa8Sgb5z\x8cq]\x18\xee(\xa4sl\xbc\x9b\x99=ڽ\xc4\xc3\xec`\xec\x8b\xd4\xdb)\x01>\xcd\x1e4\x8bm\xc5\xec\xbb\xf8\xe7\x82\xc8O\x0f\x9f\x1e\xe6\xb0\x10\x02\x8cߡ%\xadm\x82*\x86֩o\xde\xc7\x1c\xfb\x1e\x82\x14\u007f\xb8\xfb\x16\\L\x93<\xe7\x06lV\xd1\xfa\x8fT\xa8E\xa6\b\xa2UҊ\xb1@\x99\x92\x94]gm\xa6X3f\x88c\x15fwP`\xa2\f2\x16Q_p\x18L_q\xb3\\\xec^\xf1\xb1RHK-$\xa7B\xec\xdc7J\x83!\xce\xea\xed\x11\xc1\xfa\x15\xf8\xa5\x880.x\x12 \xe7\xc3+\x1c?t\xf7\xb6mY\nO9\xc79\xf4T@9\xd0H9\x90\xd9!r1(p\xa35y\xa37\xc0N\xa1\xee\xce\xf5c\xfc\x1b#\xc4:\xf0\x17\x1c\x01~ \xcaǸ\xb1`\x9c\x8e\x11/\xc1a\f\xbe\xd7\u0600\xeb6\xce\xd9\x12\xed-\xbc,\x17\xb4\xf1\x94&\x19,\x17\xb0\x0eZ(,\x1c\x1dv\xa8\xa9C\x90\x9b\xe3\xf8]4\x9e\xeeW\x05\xd5Xa\xe4\x1a\xbf`;.C\x8a\xe1sX\x1fGj\x82\x1b\x84l,n\xe4\xcf7\b\xf9\x187\x16\xc0\x1b\xe6w \xb5\x93\x02\x81\x8d\xc0\x9f\x8a\xb5\v\x82\x9e\xf2\xffC\x8e\"ߠ\x9e\u05fc=\xb1\xf3\x16\x87/\x18_\xf1\x9fǼ\xed\x84B\xf9\x9d#\xffy-xɏG%ڟ\x1e\f\xfe\x94*,>\x92*Ϙy\x1e\x9ex\xa5R+\xcf\x16c\xceLu\x81\xb1\x16]c\xb4\xa0\xe6\xe9\xb6:\xade\xf9\u007fW\xad\x8d\xabuz\x1e\xe5zkE\v7\xb5*\xf1\x89\xe6\xcd\xcdJz\xb8\xea\xb6\x02f\xed\xa8Sl\xfb\x95\x9e\x8c\xbfH\x9b\xf2\xaeӧP?\xac!\xe8X\xa9Ō_\xc1\xdf5|\xa2ޖ\xb2\x93\x98\x13\xdfv\xcc\x00\xa4\x03m\x0et\xbcC/\x92\x00\xa3S\xbe\xa6n\x8di\x91\x9b\xe1\xb8t\x90JQƶX\x9b\xfdhƦBӢ:\x02sd:\xfb\xdfT\x1f\xaaw\xbfZ\x17\xa4\x98\xf3\xd4Ԡ\xf8\x8a{9|\xe5\x19\xa2{?8Q\x1c\xff\xe4\x0e\xf4\xe3\xc7\xd2,\xcfl\xde\xf6\xe3\b\x18\x1b\xa9\xa8\x16\x1c\x89\x13m\xc50|\x8f\xfc\xb8\xba\xbfs\xb1\x84G\xed\xc7ʾ\x03Z\x8c\x1d\x13\n\xaa\xe2M~\x97\bΣ\x1d1\x80\x93\xf6\xa2\xceA\x19\xbd\xed9N\x1a\xf9\x95\x82*\xb4dPƂ@O\xa9Io\x81\xef\x98\xdeb\xfb\n\x95\xf9\u007f\x9dS2\x9f\x9eʹ\x16\"\xf5%\xf3\xb8I\xa3Or\xacL\x1f\xbc\x00\xb7\x9b\xc7_\u007f\v\xf7E\xb3\x17ۜ+\xb8\x0f\xf6\x97,M\xa0N}\xfb\"\u070eooo\x87\xcf\xcd7 \xf1ַ\xf0W\xde5\xe0\xc0\\\xfb*\xfe\xeb\xe1PS\xb5z\xb5\x04\xfe\x92v\xa5\xe7\xc3|\x04\xd8\xda\x04\xff\x9agލ\x19t~\xee\u007f\v\x8f\xf1#Ƶ\"\x83\xf6\x14\x8d\xf0`\xa9\x95l_\xc5bP\x18\xcb-\xb7?/-z\xdfZ\xbak\xc3/17\xc85\x9ak\a\x93)_v\xf4\x9aA\xee΄\xf5\xe9\xa5x\x0e\xff\xfeϤMה\x13\x1b\x8f\xe2\x87\xfeǵw)d\x94/d\xf1'\xa7:&}\x1d\x84\xbf\xfdc\x92\xaeB\xf1\\>i\xd1\xe4\u007f\x03\x00\x00\xff\xff\x1d\r\x93\v\x97\x1c\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4\x96Ms\xe36\x0f\xc7\xef\xfa\x14\x98}\x0e{y$\xefN\x0f\xed\xe8\xd6\xcd\xee!\xd36\xe3I2\xb9tz\xa0I\xd8\xe2F\"Y\x00t\xeav\xfa\xdd;$%\xbf\xc8v6=\x947\x91 \xf0\xe7\x0f\x04Ī\xae\xebJ\x05\xfb\x84\xc4ֻ\x16T\xb0\xf8\x87\xa0K_\xdc<\xff\xc0\x8d\xf5\x8b\xed\xc7\xea\xd9:\xd3\xc2Md\xf1\xc3=\xb2\x8f\xa4\xf13\xae\xad\xb3b\xbd\xab\x06\x14e\x94\xa8\xb6\x02P\xceyQi\x9a\xd3'\x80\xf6N\xc8\xf7=R\xbdA\xd7<\xc7\x15\xae\xa2\xed\rRv>\x85\xde~h\xbeo>T\x00\x9a0o\u007f\xb4\x03\xb2\xa8!\xb4\xe0b\xdfW\x00N\r\u0602\xc1\x1e\x05WJ?\xc7@\xf8{D\x16n\xb6\xd8#\xf9\xc6\xfa\x8a\x03\xea\x14xC>\x86\x16\x0e\ve\xff(\xaa\x1c\xe8sv\xf5)\xbb\xba/\xae\xf2joY~\xbaf\xf1\xb3\x1d\xadB\x1fI\xf5\x97\x05e\x03\xb6n\x13{E\x17M*\x00\xd6>`\vwIVP\x1aM\x050\xf2\xc82kP\xc6dª_\x92u\x82t\xe3\xfb8Ldk0Țl\x90L\xf0\xb1\xc3|D\xf0k\x90\x0e\xa1\x84\x03\xf1\xb0\xc2Q\x81\xc9\xfb\x00\xbe\xb2wK%]\vM\xe2\xd5\x14\xd3$d4(\xa8?ͧe\x97\x04\xb3\x90u\x9bk\x12X\x94D\x9eD\xe4\xb8\xd6;\xa0#\xbe\xa7\x02\xb2}\x13:ŧ\xd1\x1f\xf2µ\xc8\xc5f\xfb\xb1\x90\xd6\x1d\x0e\xaa\x1dm}@\xf7\xe3\xf2\xf6黇\x93i8\xd5z!\xb5`\x19Ԥ4\x81+\xd4\xc0;\x04O0x\x9a\xa8r\xb3w\x1a\xc8\a$\xb1\xd3\xd5*㨪\x8efg\x12\xde'\x95\xc5\nL*'\xe4\fm\xbc\x04hƃ\x15\x98\x96\x810\x102\xbaR`'\x8e!\x19)\a~\xf5\x15\xb54\xf0\x80\x94\xdc\x00w>\xf6&U\xe1\x16I\x80P\xfb\x8d\xb3\u007f\xee}s:g\n\xda+9\xe4g\x1a\xf9\xd29\xd5\xc3V\xf5\x11\xff\x0f\xca\x19\x18\xd4\x0e\bS\x14\x88\xee\xc8_6\xe1\x06~I\x98\xac[\xfb\x16:\x91\xc0\xedb\xb1\xb12u\x13\xed\x87!:+\xbbEn\fv\x15\xc5\x13/\fn\xb1_\xb0\xddԊtg\x05\xb5D\u0085\n\xb6\xce\xd2]\xee(\xcd`\xfeGc\xff\xe1\xf7'Z\xcf.H\x19\xb9\xd0_\xc9@*\xf3\x92\xf6\xb2\xb5\x9c\xe2\x00:M%:\xf7_\x1e\x1ea\n\x9d\x931\xa7\x9f\xb9\x1f6\xf2!\x05\t\x98uk\xa4\x92\xc45\xf9!\xfbDg\x82\xb7N\xf2\x87\xee-\xba9~\x8e\xab\xc1\nOW2媁\x9b\xdcbSQ\xc7`\x94\xa0i\xe0\xd6\xc1\x8d\x1a\xb0\xbfQ\x8c\xffy\x02\x12i\xae\x13ط\xa5\xe0\xf8\xef07.Ԏ\x16\xa6\xf6}%_\x17\x8a\xf6!\xa0N\x19L\x10\xd3n\xbb\xb6:\x97\a\xac=\xc1Kgu7\x15\xed\x8c\xee\xbe\xc0\x9b\x93\x85\xcb\x05\x9dơM\xceW\xae\x1e\x1er\xee,\xe1\xec\x16\xd6p\xd6s_璛\xe1\xbf$S:\xf1\xc8FG\"trԟեMoe\x81D\x9e\xcefg\xa2\xbed\xa3\xfc\x04P\xd61(\xb7\x1b7\x82tJ\xe0\x05)\x95\x81\xf61\xf5\x194`\xe2\x19\xbf\x11\xcb\xf1\xbf$\x90\xd7\xc8ܜ\xd9Y\xc1ႦW\xb2\x93Fz^\xa8U\x8f-\bE\xbc\x92YE\xa4v\xb3\xb5\xfc\xcf\xfa\x06\x82e\xb2\xb9\x94\x83\xfd\u007f\xfa\x9bIȸ]\x1c\xce#\xd5p\x87/\x17foݒ\xfc\x86\x90\xe7W>-.\v\xbd\xfdc\xe0\r\x94.^ʳIN\xfd\xce\x1cQd\xf1\xa46\xc7\\9\xae\xf6\xfd\xbb\x85\xbf\xfe\xae\x0e\xf7Zi\x8dA\xd0\xdc\xcd_i\xefޝ<\xb7\xf2\xa7\xf6\xae\xbc\x8c\xb8\x85_\u007f\xabJ(4O\xd3\xeb)M\xfe\x13\x00\x00\xff\xff--\nM\xde\n\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4WM\x8f\xdb6\x10\xbd\xfbW\f\xd2CZ \x92\x13\xf4\xd0·v\x93âi\x10\xd8\xe9^\x8a\x1ehj,\xb1K\x91,g\xe8\xcd\xf6\xd7\x17CJ\xfe\x90\xe5\xdd͡\xbc\x89\x1c\x0e\x1f\x1f\xdf.\xbd\u007f[\xffT\xbf]\x00\xe8\x88y\xfa\x17\xd3#\xb1\xea\xc3\n\\\xb2v\x01\xe0T\x8f+h\xfc\x83\xb3^5\x11\xffIHL\xf5\x1e-F_\x1b\xbf\xa0\x80Z\x16m\xa3Oa\x05ǁ2w\x00T6\xf3~H\xb3.i\xf2\x885Ŀ͍~4CD\xb0)*{\t\"\x0f\x92qm\xb2*^\f/\x00H\xfb\x80+\xf8$0\x82\xd2\xd8,\x00\x86\xbdgXհ\xbb\xfd\xbb\x92Jwث\x82\x17\xc0\at\xbf|\xbe\xbd\xfbqs\xd6\r\xd0 \xe9h\x02g\x06'\x98\xc1\x10(\x18\x10\x00\xfb\x03(P\x0eTd\xb3S\x9aa\x17}\x0f[\xa5\xefS8d\x05\xf0ۿQ3\x10\xfb\xa8Z|\x03\x94t\aJ\xf2\x95P\xb0\xbe\x85\x9d\xb1X\x1f&\x85\xe8\x03F6#˥\x9d\x88\xeb\xa4w\x02\xfc\xb5\xec\xadDA#\xaaB\x02\xeep\xe4\a\x9b\x81\x0e\xf0;\xe0\xce\x10D\f\x11\t]\xd1\xd9Yb\x90 \xe5\x86\x1d\u0530\xc1(i\x80:\x9fl#b\xdccd\x88\xa8}\xeb̿\x87\xdc$\fɢV\xf1(\x87c3\x8e1:ea\xafl\xc27\xa0\\\x03\xbdz\x84\x88\x99\xa7\xe4N\xf2\xe5\x10\xaa\xe1w\x1f\x11\x8c\xdb\xf9\x15t́V\xcbekx,*\xed\xfb>9Ï\xcb\\\x1ff\x9b\xd8GZ6\xb8G\xbb$\xd3V*\xea\xce0jN\x11\x97*\x98*Cw\xb9\xb0\xea\xbe\xf9.\x0eeH\xafϰ\xf2\xa3Ȍ8\x1aמ\fd\xcd?q\x02\xa2\xfa\"\x982\xb5\xec\xe2H\xb4t\t;\xeb\x0f\x9b/0.\x9d\x0fc\xca~Q\xcea\"\x1d\x8f@\b3n\x87\xb1\x1cbV\x9e\xe4D\xd7\x04o\x1c\xe7\x0fm\r\xba)\xfd\x94\xb6\xbda\x1a\xc5,gU\xc3Mv\x1a\xd8\"\xa4\xd0(Ʀ\x86[\a7\xaaG{\xa3\b\xff\xf7\x03\x10\xa6\xa9\x12b_v\x04\xa7&9\r.\xac\x9d\f\x8cNv\xe5\xbc&\xa5\xbe\t\xa8\xe5\xf4\x84@\x99ivF\xe7Ҁ\x9d\x8f\xa0\x8e\x95?\x10X\x9fe\x9e\xaf\xdc\fN\xc5\x16y\xda;\xc1\xf2%\a\xc9\xf2\x0f\x9d:7\x9a\xef\xb1nk\xf1\n\x1a\x80\x14\xf7\xf8\xa1\xbe\xc8x\x1d\x03̪w\x16\xc9(b\xa1Ax\x15+\x10\x93:\xc5t\xb9\xb44t\xa9\x9f_\xa0\x82_3揾}r\xfc\xc6;\x16\xb9?\x19t\xe7m\xeaq\xe3T\xa0\xce?\x13{\xcbؿ,r\xbc\x90\x0f\x97\xd4e\xe0\x1a\xc5\xca\xf1\xfa&\x86\x805R\xb2W\x97\xbb\xd9\xdc~\xcb>\xae\x84?\xc9ԕ\xda\x19[\xbe#\x9f\x17\x82ܲ\xa3\x10dJ\xb98\x10\xe4\xed\x11\x1d2\xd2\xd1\xc3\x1e\fw\xb3\x19\x01\x1e:\xa3\xbb<1\xabH\xec\x91\xc8k\x93\xcd\xe6\xdb\xe1K\xf1\x99\x883J\xae\xb2\xc2g\xba\x05\xfcE\xf7\x15˸\xb6@5\x94\xf1\x8bl\x87\x15'\xfa\x06\xe3\xc9\xf1#\xd5:ň\x8e\x87,\xf9\"\x9eNx\xa9\xf3\x8c\xe5\xfa\xc7\xfa\xe33\xf6\xf3\xfe\x18\x99\x9f\x9aʸ\x82&D\xacȴ\xf2|\x9011\xa0l\f\x97d\x94v\xfe\x9c9'j\xf6D\xf1k01\xdb\xec3\x10?\x1c\x02\x8bK\xa2+7\xe0\xf4\xc1\x96\x13\"\xe5ׅV\xd3w\x8d\xb4-B\x83\x16\x19\x1b\xd8>\x16\xbb\u007f$\xc6\xfe\x12\xf7\xce\xc7^\xf1\n\xe4f\xac\xd8\xcc\xc8H\x1e\xd5jkq\x05\x1c\xd35\x95\xcdn\xf38\xfa\x01\xed\xe4\xfa\x8b\x99\xdb\x1e\xd1~[aޞ\xc3\xf4\xee\xfb\xe0@\xcb\x1a[\xb6\x8e+\x95F\xf9\xf6\xf6\xfa\xfe\xcfw\xa3a\x00m\x94F\xe3Dr\xe8\xe1\x1bı\xc1(\x8cY}A\x04\xc3*\xe0\x14\xc0\xd0\x06\xab\bc\xc8#\x86 \x0eaIu\rZ\x94nȒ\xf4\xa9-0\tj\xf3\v\x96\xae\x80;4D&\t\xa6Tr\x87Ɓ\xc1RUR\xfc\xb7\xa7mI\xd7\xe8І9\x8cq\xe5\xf0y\xd7/Y\x03;\xd6t\xf8\x1a\x98\xe4в=\x18\xa4S\xa0\x93\x03z~\x89-\xe0ge\x10\x84ܪ5\xd4\xcei\xbb^\xad*\xe1R\xfc.U\xdbvR\xb8\xfdʇb\xb1\xe9\x9c2v\xc5q\x87\xcdʊj\xc9LY\v\x87\xa5\xeb\f\xae\x98\x16K\x0f]\xfa\x18^\xb4\xfc\x1b\x13#\xbe\xbd\x18a\x9d)F\xf8|x=!\x01\n\xb0 ,\xb0\xb85\xdc\xe2\xc0\xe8\xe4 \xdf\xff\xe3\xee\x03\xa4\xa3\xbd0\xa6\xdc\xf7|?l\xb4\a\x11\x10Ä\xdcbt0[\xa3ZO\x13%\xd7JH\xe7\u007f\x94\x8d@9e\xbf\xed6\xadp$\xf7\xffth\x1dɪ\x80K\x9fԐ\xc3\xec4i./\xe0Z\xc2%k\xb1\xb9d\x16\xbf\xba\x00\x88\xd3vI\x8c}\x9a\b\x86\xf9\xd8tq\xe0\xda`\"%MG\xe45Ʉ\xee4\x96$=b \xed\x14[\x11=\x14\xb9s6]^\x8c\b\xe7\r\x97\xbe\xacw\x9a.\x82\\p\x99\xecI\xd8\xe4\xc0\xa7&\x87\x19VΈ\x024S/\xdb\xef\x19F.\x1b\x1dl1\xa3pD\f\xf4I\xc5\xf1\xcc=n\x14\xc7\x1cl\xda\n\xaefA[)\xe3#\u007f\xd4I9?\x85>%\x9f\x05L+~\x06W<\x91\x81\xc1-\x1a\x94%&\xc7u*\x9d\xc9 \x1b&\x1as\x8cǕ\x02Nx\xf5,ⷷ\xd7ɓ'&F\xecn~\xee\x19\xfeз\x15\xd8p\x1f\xe8Ο}q\xbd\r\x87y\x9f\xe6\x140\xd0\x02Cb\xda\a\t\x10\xd2:d\x1c\xd46K\x91\xca' \xc37\x18w\xbc\x0e\x1e,\xba\xcaCh!\xde\x03#\xdf)8\xfc\xeb\xee\xdd\xcd\xea\x9f9\xd6\xf7\xb7\x00V\x96h}^\xee\xb0E\xe9^\xf7\xa5\x02G+\frJ\xfc\xb1h\x99\x14[\xb4\xae\x88g\xa0\xb1\x9f\xde|\xces\x0f\xe0Ge\x00\xbf\xb0V7\xf8\x1aD\xe0x\uf593\xd2\b\x1b\xd8\xd1S\x84G\xe1j1\r\xa6=\aH\xbd\xe2\xb5\x1f\xfdu\x1d{@P\xf1\xba\x1dB#\x1ep\r\xaf|Zs\x80\xf9+\xd9\xceo\xaf\x8eP\xfdS0\xedW\xb4\xe8U\x00\xd7\xc7\xe1\xa1\xd1\x1d@\x06\xcb3\xa2\xaa\xf0\x90UM?\x1fT\xc8U\u007f\v\xca\x10\a\xa4\x1a\x90\xf0\x84Iz\xc1Q\"\x9f\x81\xfe\xf4\xe6\xf3Q\xc4c~\x81\x90\x1c\xbf\xc0\x1b\x10\xb1\xd8Ҋ\u007f[\xc0\a\xaf\x1d{\xe9\xd8\x17:\xa9\xac\x95\xc5c\x9cU\xb2ه\xf6\x881\tʄyp\xcdL\ueffa*\x13C;C\x88\xf6\xcb\xd8\xf6[2\xc9\xe9\xffVXG\xe3/\xe2`'\x9ed\xbe\x1f\xaf\xaf~\x1f\x05\xefċl\xf5H\x02\x1etd\xd8\xe58\x93\x98\xbd\x1f-N\xa9c&c\xed\xd7<+3t\xacʤb\xc3\xf6䩄\xed$\aƭ\x18VY`\x06\x81A\xcb4I\xee\x01\xf7\xcb\x10\xe25\x13\x14\x9f)\x04\xf7}\x0e`Z7\"\x1b\x8ac \x8fIh\xe4\x04\x15ڬ\xb2\xc7\ue795ð\xafsF\n\x1f\aK\x93\f\xcet\x96\\\x9d\xb3\xd4Q\xbfi\x8e\x16e\xd7Ρ,\xe1Ai\xc12\xe3\x06\xad\x13ef\xe2\xd5<\xd38!\xac\xc0\xcb3<\x88-\xe8L\xf1\x12E\x112\xbd\xbe\x80\xf1]\xc7\\\x85p\xbc<8\n\x91*t\xca[\xc7\x10\x97\xf9Rr\xb2\x86J\xabɐV|1ed\xa6\xf3\x98&G\x9d\xd1!\xd2y}\xed\x1b\xdeϨ\xb0C#?\xf24\xf8S\x97\xda\xfbTL\xbc\xb4\xc6.\x15\xe5\xe9㧕\xd3⽜\xef\xf0\xed,ã\xba\x8b\x96\xacw\xd0\xf6\x8fg\xe4\x8ad\x18\x90\v;}\x04#j\xc8}\x12M9\xfe\x96\x89\x069\xa4\xb7\x9d\xe9\x9e\f\xd5!\x95\rn\xc9\xdd\a\xd3K\xa5i\x84\xd7'\xaa5\x82\xf5}\xa2\v{\x82fg\x91\xfb\x9eF\x86\t\xf3\xe4u\xabL\xcb\\\xe8k.\xb3De\xd74l\xd3\xe0\x1a\x9c\xe9\xe6\xd3',\xb1EkYu\xce\x14\u007f\x0e\xabB\xc5\x1e\xb7\x00ۨ\xce\xf5%\xfb\xc8=^بS\xcf\xeb\x1ad\x8b\xe1\xb1:3*VlLڛ\xc6\xef\x19:\x82Ã\xa0G\xb5\xc1|\xd0\u007f\x89O\x00\xf0\x0fZ\xe7\x10Қ\x9c\x81\xf5\xde뤅\xc1\t\xa7|\x83\x8f\x99\xd1\xd9C\xdcp\xf22\x99Lf\xeeGo\rϺ\u007f<\xe8\x1c\v\xe22\xa8U\x93\x8cY9ր\xec\xda\r\x1a\xe2\xc3f\xefЎ\xddy\xae?\xe3\xeb\xba\x03\x1b\a\xfb\x93\xfc\x02\xa5X\xaa\x96L\xfa>*Y\x97S\xc0\x85\xd5\r\xdbg\b\xa7\x8b\xf8܍\x8c\x8b\\\xc0A\x9f\x93Qk4~\xea\xb9}%\x8f\xe9J\xc9#\x95F\xb2g!\xdd_\xffr\"\xd3\x13\xd2a5\t\x0eq\x9e\xd8\xf9\x03\x9d\xf2uN8\x91\xc4Xɴ\xad\x95\xbb\xbe:\xa3\x05w\xfd\xc2d\r\xb3\xe79\xec\xa9EUȉ\xaa\xf7-\xcf2\xd5\xf1\x13\xf09\xa8\xa3\xc5g\xa2P||\xceŠ;\xd4̐\xa5\xfb7\x81\xcb\xe9\xa3\xd5k\xb0\xc27:)\xf3\f\xa9hhCX\nN\x94Z)\x83\x19\x97\t\xf3\xb02\n\"c\xf8\xbfg\xfc\xc8\xea\xc9l\xd0#\xe7\x03ڱY>\x1c\xe96\xfdC\xd0\x1a~\xfdmqHlXI\xc5\x13\xf2\x9b\xe9\x1fYĔ3\xfdՄ\xffY*\x19*\t\xbb\x86O\x9f\x17\xe9\xd9\xf2>\xfd1\x04\r\xfe/\x00\x00\xff\xff\xb0\xddǼ\x99\"\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\x1b\xb9\x11\xbe\xf3Wti\x0f\xcaV\x99\xc3]'\x95\xa4x\xb3\xa5lJɮ\xac2e]\\>\x80\x83&\a\xab\x19`\x82Ɛf\xb6\xf6\xbf\xa7\x1a\x0fr^$%U\xb4\x9e\x8b-\xa0\xd1\xf8\xf0\xa1_hN\xa6\xd3\xe9D\xd4\xea\x01-)\xa3\xe7 j\x85_\x1dj\xfe\x8b\xb2ǿS\xa6\xccl\xf3\xe3\xe4Qi9\x87\xab\x86\x9c\xa9>\"\x99\xc6\xe6x\x8d+\xa5\x95SFO*tB\n'\xe6\x13\x00\xa1\xb5q\x82\x87\x89\xff\x04ȍv֔%\xda\xe9\x1au\xf6\xd8,q٨R\xa2\xf5\xca\xd3֛\x1f\xb2\xbfe?L\x00r\x8b~\xf9\xbd\xaa\x90\x9c\xa8\xea9\xe8\xa6,'\x00ZT8\x87\xdaȍ)\x9b\n-\x923\x16)\xdb`\x89\xd6d\xcaL\xa8Ɯw][\xd3\xd4s8L\x84\xc5\x11Q8͝\x91\x0f^\xcfǠ\xc7O\x95\x8aܿG\xa7\u007fV\xe4\xbcH]6V\x94#8\xfc,)\xbdnJa\x87\xf3\x13\x00\xcaM\x8ds\xb8e(\xb5\xc8QN\x00\"\x01\x1e\xda\x14\x84\x94\x9eRQ\xdeY\xa5\x1d\xda+V\x91\xa8\x9c\x82Dʭ\xaa\x9d\xa7l\xaf\a\xcc\n\\\x81\xbc\xa5\xa7[(\xad\xf4\xda\x0f\x05\b\xe0\f,\x11\"\x12\xe9\x95\x01\xfcJF\xdf\tW\xcc!c\xe2\xb2\xda\xc8L'\x9dQ&p~\xdb\x1bu;>\a9\xab\xf4\xfa\x18\xb2\xff3\xa8\x0e\x9e;#\x9f\x88\xe4\xbe@/\x93\xd04ui\x84D˛\x17B\xcb\x12\x81-\x17\x9c\x15\x9aVh\x8f\xa0H\xcb\xeewu\x17ɧ\xa4\xaf5\xf3\x1cv\x9eCE\x90\xedl\xff\xd0\x1e:\xb7\uf751q\x01D\xa3\x06r\xc25\x04\xd4\xe4\x05\b\x82[\xdc\xcen\xf4\x9d5k\x8bD#0\xbcxV\x17\x82\xba8\x16~\xe2uq\xac\x8c\xad\x84\x9b\x83\xd2\xee\xaf\u007f9\x8e-.ʜq\xa2|\xbfsH\x1d\xa4\xf7\xfdဖ\x9dm\x1d\xaf\xff\x9b\xc0]2\xa4k\xa3\xbb\xbc\xbe\uf34e\x81m)M\x818\x1b\x04ю\xd6w\xeb\xae>)\\\x18\bӛ\x1fC(\xcb\v\xac\xc4ם\xb8\x01Nv\xa0\bD\\\x1aNq :\x85\xec\x8f\xffX\xdcC\xda\xda_F\x9f}\xcf\xfba!\x1d\xae\x80\tSz\xc5A\x97/qeM\xe5u\xa2\x96\xb5Q\xda\xf9?\xf2R\xa1\xee\xd3OͲR\x8e\xef\xfd?\r\x92\xe3\xbb\xca\xe0ʗ\x18\x1c/\x9b\x9a-Wfp\xa3\xe1JTX^\t\xc2W\xbf\x00f\x9a\xa6L\xecӮ\xa0]\x1d\xf5\x85\x03k\xad\x89T\xc1\x1c\xb9\xaf~U\xb2\xa81\xe7\xebc\x06y\xa9Z\xa9\xdc\xfb\x06\x87\x1f\x10\x03\xf9\xac\xa3z\xdcu\xf9[\x8a\xfc\xb1\xa9\x17\xceX\xb1ƟM\xd0\xd9\x17\xeaa{?\xb6&\x81ӭ\x9c\x17\x94\x03\x05ɁR\x802-\xde\x16h\xb1\xbd\xc6bmH9cw\xac8d\xcbl\xa0\xe1\xc8E\xf8#\x1by\xe6\x18\x1c\xee\xbdCX\\\xa1E\x9dc\x8a\x10\xa7*\x99\x91S\xb4\x12\xfa\x10\xe2q\xea\xe1D\xf4\x1c\x05\xfc\xee\xee&E\xcc\xc4p\x84\xee\x86\xfb\x9e\xa1\x87\xbf\x95\xc2R\xfa\x84r~\xef˛U\xd8\xcc\xc7\x0eg@@\xad0T\xa4\xfb`\fJ\x93C!\xc1\xacF5\xf2\xa3\x01\xd8\xc1,\xc6\x15oB\xa4\x88!\xe9\x10\u0099z\x10\x1c\xa3\x94\x84\u007f->\xdc\xce\xfe9\xc6\xfc\xfe\x14 \xf2\x1c\x89|\xbe\xc6\n\xb5{\xb3\xcf\xd9\x12IY\x94\\\xb8`V\t\xadVH.\x8b{\xa0\xa5\xcfo\xbf\x8c\xb3\a\U00013c40_EU\x97\xf8\x06T`|\x1f\xfe\x92\xcd(\nt\xec5\xc2V\xb9B\xf5\x93֞\x01\xb6\xaex\xec\xad?\xae\x13\x8f\b&\x1e\xb7A(\xd5#\xce\xe1\xc2W\x82\a\x98\xbf\xb1c\xfd~qD럂\x03]\xb0\xd0E\x00\xb7\xcfwm\x8f<\x80t\x85p\xe0\xacZ\xaf\xf1P\x88\xf6?\x1f\xbc9$~\x0f\xc62\x03ڴTx\xc5|{!\x1e\xa1\x1c\x80\xfe\xfc\xf6\xcbQ\xc4]\xbe@i\x89_\xe1-(\x1d\xb8\xa9\x8d\xfc>\x83{o\x1d;\xed\xc4W\xde)/\f\xe11f\x8d.w\xa1\xda\xdf \x90\xa9\x10\xb6X\x96\xd3PoH؊\x1d\xb3\x90.\x8e\xedM@-\xac;i\xad\xa9ʸ\xffp\xfda\x1e\x90\xb1A\xad}\xbc\xe3\xec\xb4R\\5p\xb9\x10r\x9e\xb7\xc6A\xd2L\x1f5\xc1|\x9c\x81\xbc\x10z\x8d\xe1\xbc\b\xab\x86\xb3Pv\xf9\x12?\x1e\xa6\xfe\xf4\x8d\x94\x00\xfd\xc0\xf1͒\xe8\x13\x0f\xe7+\xd5'\x1c\xae\xfd\xd6:y\xb8\xc7f\x89V\xa3C\u007f>ir\xe2\xa3\xe5X;\x9a\x99\rڍ\xc2\xedlk\xec\xa3\xd2\xeb)\x9b\xe64\xd8\x00\xcd\xfc\x93y\xf6\x9d\xff\xe7\xc5g\xf1\xaf\xeb\xa7\x1e\xa8\xf3\xe8\u007f\xcdS\xf1>4{ѡR\xad\xf8\xf4\x87\xdf~\x9f\x1cҝȹ\xd6Ey\xdb\xff!8V#\xe9w]\xffgnt\xf8!\x96\xe6\xf0\xf9\xcb\x04b\xab\xf1!\xfdX˃\xff\v\x00\x00\xff\xff\xd7w>\xba>\x1f\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\x1b\xb9\x11\xbe\xf3Wti\x0f\xcaV\x99\xc3]'\x95\xa4x\xb3\xa5lJɮ\xac2e]\\>\x80\x83\xe6\f\x963\x00\x02`H3[\xfb\xdfS\x8d\a9/\x92\xa2*Z\xcf\xc5\x16\xd0h|\xf8\xd0/4'\xd3\xe9t´xBc\x85\x92s`Z\xe0W\x87\x92\xfe\xb2\xd9\xfa\xef6\x13j\xb6\xf9q\xb2\x16\x92\xcfᦱN\xd5\x1fѪ\xc6\xe4x\x8b+!\x85\x13JNjt\x8c3\xc7\xe6\x13\x00&\xa5r\x8c\x86-\xfd\t\x90+錪*4\xd3\x02e\xb6n\x96\xb8lD\xc5\xd1x\xe5i\xeb\xcd\x0f\xd9߲\x1f&\x00\xb9A\xbf\xfcQ\xd4h\x1d\xab\xf5\x1cdSU\x13\x00\xc9j\x9c\x83V|\xa3\xaa\xa6F\x83\xd6)\x836\xdb`\x85FeBM\xacƜv-\x8cj\xf4\x1c\x0e\x13aqD\x14N\xf3\xa0\xf8\x93\xd7\xf31\xe8\xf1S\x95\xb0\xeeߣ\xd3?\v뼈\xae\x1aê\x11\x1c~\xd6\nY4\x153\xc3\xf9\t\x80͕\xc69\xdc\x13\x14\xcdr\xe4\x13\x80H\x80\x876\x05ƹ\xa7\x94U\x0fFH\x87\xe6\x86T$*\xa7\xc0\xd1\xe6Fh\xe7)\xdb\xeb\x01\xb5\x02W\"m\xe9\xe9fB\nY\xf8\xa1\x00\x01\x9c\x82%BD½2\x80_\xad\x92\x0f̕sȈ\xb8L+\x9eɤ3\xca\x04\xce\xef{\xa3nG\xe7\xb0\xce\bY\x1cC\xf6\u007f\x06\xd5\xc1\xf3\xa0\xf83\x91<\x96\xe8e\x12\x9aFW\x8aq4\xb4y\xc9$\xaf\x10\xc8r\xc1\x19&\xed\n\xcd\x11\x14i\xd9\xe3Nw\x91|J\xfaZ3\x97\xb0s\t\x15A\xb6\xb3\xfdS{\xe8ܾ\x0f\x8a\xc7\x05\x10\x8d\x1a\xacc\xae\xb1`\x9b\xbc\x04f\xe1\x1e\xb7\xb3;\xf9`Ta\xd0\xda\x11\x18^<\xd3%\xb3]\x1c\v?\xf1\xba8V\xca\xd4\xcc\xcdAH\xf7\u05ff\x1c\xc7\x16\x17eN9V\xbd\xdf9\xb4\x1d\xa4\x8f\xfdဖ\x9c\xad\x88\xd7\xffM\xe0.\tҭ\x92]^\xdf\xf7F\xc7\xc0\xb6\x94\xa6@\x9c\r\x82hG뻢\xab\x8f3\x17\x06\xc2\xf4\xe6\xc7\x10\xca\xf2\x12k6\x8f\x92J\xa3|\xf7p\xf7\xf4\xe7Eg\x18@\x1b\xa5\xd18\x91\xa2k\xf8ZY\xa55\n]f\xafIa\x90\x02N\xe9\x04mp\x8a0\x86\xa3f5\xff\xce\xc4\xfck\xaf;X\aN\x17>\x9f\xebN\xdc\x00%;\x10\x16X\\\x1aNq :\x85\xec\x8f\xffX\xdc\xcf\xfe9\xc6\xfc\xfe\x14\xc0\xf2\x1c\xad\xf5\xf9\x1ak\x94\xee\xcd>gs\xb4\xc2 \xa7\xc2\x05\xb3\x9aI\xb1B벸\a\x1a\xfb\xf9\xed\x97q\xf6\x00~R\x06\xf0+\xabu\x85o@\x04\xc6\xf7\xe1/ٌ\xb0\x81\x8e\xbdF\xd8\nW\x8a~\xd2\xda3@\xd6\x15\x8f\xbd\xf5\xc7ul\x8d\xa0\xe2q\x1b\x84J\xacq\x0eW\xbe\x12<\xc0\xfc\x8d\x1c\xeb\xf7\xab#Z\xff\x14\x1c芄\xae\x02\xb8}\xbek{\xe4\x01\xa4+\x99\x03gDQ\xe0\xa1\x10\xed\u007f>xSH\xfc\x1e\x94!\x06\xa4j\xa9\xf0\x8a\xe9\xf6B\x00\xfd\xf9헣\x88\xbb|\x81\x90\x1c\xbf\xc2[\x102p\xa3\x15\xff>\x83Go\x1d;\xe9\xd8W\xda)/\x95\xc5c\xcc*Y\xedB\xb5\xbfA\xb0\xaaF\xd8bUMC\xbd\xc1a\xcbv\xc4B\xba8\xb27\x06\x9a\x19w\xd2ZS\x95\xf1\xf8\xe1\xf6\xc3< #\x83*|\xbc\xa3\xec\xb4\x12T5P\xb9\x10r\x9e\xb7\xc6A\xd2L\x9fm\x82\xf98\x05y\xc9d\x81\xe1\xbc\b\xab\x86\xb2Pv\xfd\x12?\x1e\xa6\xfe\xf4\x8d\x94\x00\xfd\xc0\xf1͒\xe83\x0f\xe7+\xd5g\x1c\xae\xfd\xd6:y\xb8u\xb3D#ѡ?\x1fW\xb9\xa5\xa3娝\x9d\xa9\r\x9a\x8d\xc0\xedl\xab\xccZ\xc8bJ\xa69\r6`g\xfe\xc9<\xfb\xce\xff\xf3\xe2\xb3\xf8\xd7\xf5s\x0f\xd4y\xf4\xbf\xe6\xa9h\x1f;{ѡR\xad\xf8\xfc2q5\xacTN0\x11\f\xe0\f\a\xb1\x994\xf22\x8a\xf6\x13*E?B\xaf\x11oE\xe3!\xf6R\xbb\xa2\x874\x95\xbd]\x84\xd3\xf1\xf7^OF+>\xe9\x93\xd6v\xc9\xde\xe4\xc1\xa1\xfa\x13][\xed\xcdv\x9a\x9c\xed\xd3\f\x9fʾ\x83v\xc9c9t\xed\"\xef!f\xbb\xd4ˣ\aˋ\x9f˹\xa2\xc7@\xf7W\x8b\xd36p3\\\xe1{S\x86G\x9f\x105\xfa7hh8n\x99M\x9b\x8c\xdd7\xb4\xf4\x85\xa5>O\x92:\xe4\xbeT\xa7\x97Ċ\x89\n9\xec\u007f7\xf1\xcdq\xeb\x9b4\xd7c\x95iR\xd4X\xe4>n\x8c\x80\x1e\xaeK}O\xce\x1cNI\xc5@B6UŖ\x15\xce\xc1\x99f8}½j\xb4\x96\x15\xe7\xfc\xeb\x97 \x15^\xf1q\t\xb0\xa5j\xdc\xfe\x19\x1f\x1d-Rqm\xa3\x15\\\xd6J(\x99=\a\xe5\x81d\xc6,n\xef\xf2\xa7M\x0eN\x84\xb2{\u070e\x8c\x0e\xfa\xd0\xedɛdB#s?y븈\x80\xb8\xd19\x0e\xa2\x18\x94\xaaJ֭\x1c%\xa5\xa6^\xa2!\"|\xf3;1\x92\x02\xc7X_Ŀ\xa7\x0eL\x1e4\xa4X\x18T\xc5\x17bΤo\x13\x92\xfd:\x05\\X]\xb1݈\xdet\x12_2\x91\xf9\x92\x1f\x1d,&y!\xb9\xbf\x9f\xbb\xb4\x9f\xb3o\xee\x8f\x17tc?\x15\x8c\xddB\xbb\xefߛ\xdf\xff\xaa\xf1:;\x9c(\xe2\xacc\xc6=7\xec-:\xc2\xe7\"\x9eW=\x1e\xefڡk\x18\xa8\xba\xdb\xfc\x911j\x94\xa8\xc1\xa0G\xce[\xbac/\xb4=\xd2,\xf7\x9d\xfe9\xfc\xf6\xfb\xe4\x90\xeeXNU;\xf2\xfb\xfeOڱVI\xbfP\xfb?s%\xc3O\xcav\x0e\x9f\xbfL 6M\x9f\xd2\xcf\xce4\xf8\xbf\x00\x00\x00\xff\xffe\xe5\xd5&\b \x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xdc\x18\x93Z\x98\x92\x9a\x01fAW֬#D{zSI\xa4\xbd\x16ĺv\xacV\x95\x1f{h\xe0:\x14\x8fS\x04V\xcc`\x0e*\x88}%Є\xb5r\xc7\xfeV\xb1\\\x8d\x82n\x90\xf7\xae\x86`+\x14`P`f\x95\x1eR2\x85\x9e\xfeIQ\x96#t\x8c\xa8;\xfc\xb7\x88M\x80\x04\x12\xf3\xe7-϶\xde\v \xd9tp Wh\x9c\xe6 Ou?\x86$\xcc\xf1>,2\xa5;\xdag\xe6L\x1d\u008b\xe9\x93\xf6Iз\xed3\xa3y\a\x9a%\xbc\x8f\x9a\xce\xf6\xf9\xf7$lmJN\x10ڻ\xc1\xd4\xd7\x15Z\x17U\x91\xb7\u007f\xb7\x06,J\xbb\xbf\x02n\xeb\xb7s\x10\x99\x10\x9d\xf5\xffƌ9^\xe2\xef\x0eg\xbe\xaa\xc4Ore\x0e\"q\xa5Y\xfeo\xc8\x14g,\x1e\x83\xadHfȯ\xddYW\xc0\xd7\rC\xf2+XsaQ\x1fp\xe6E\xe7\xe55\x88\x91b\xef\xe8)\x98Ͷ\uffd0gc\xda\fT\"]\x0e'{\x9f\xb8\x0e\x12\xfa\x86y\x06.\xb8\xf8\x95k,|\\\xfc\xc9Q\xb3}\xe3\x02\x8aw\x1f~\xc4|\x8a<\x90&y\x03D\xde\x1dl\xb6\xbbtp\xf4S\xd1\b\xaeO\x134\xf9\x8c\xc7\x150x½\xf7X\x98\x04b\x0e\xb3\xceߍ\x86OC\xe2\xb8ԋw\x8fq\xef\xc0\x84\\\xca\xec\xecTQ\xf0\xcf\x13F\xfc\xfd\xd8\xd3# \xed)D\xb8\x9e\x92\xf4\xc2\x11\xc2E\xde\xe9\xc4\x03\x97\x17\xabu\xd1\xb7]o\x87@\x9c\xd5ۡ\x05\x1aCwJj\xe6\xae\a\x80Le\xed8\xbb\xbd7Rs\x84\xe7\xb3B\nP1\xf7I/2\x9f\xc1\x8f\xf6\xbdK#e\xf0(vǻ.I\x9c\xedNH\xcf\x12}Y\xb4\xed\n\v\x97\x14\xd4;\\T\xf2I\xaag\xb9p1\xa5\x99\xcd\xd67\x8b\x9f\xac8\xbe\xa6\xd2\xe8\x8bW\xba\b\xd4\xf6\xf7\fJ!\x99\xcdG\x05\xc6SR0\xa7\x86|\x1b\xebȟ\xb3\xbb\x98Z\u007fbr\xa89\xde\xfa\xfe\xd3\xd4ަ\xbb\xf8\xac\x8e\xff\xf0\xbcE\xbbE]7\xb6.\\\x0foL\xad\xb6\xa5\xc9\xd6\x15n\x9a\x9dH~jo\xca7\xe1\x1d\xb4?\xc5}eY\tqE\x82\xcd*a}+\xaa\xae\"B\x94\xd4\x03\xb4RJ ;l\x8bM)\xa2ϕ\xce\xfb\xfd`M\xe9\xban\bS\xf5\"\x11\f=/}\xd7g\xb7.ۯ\x81\xbb\xecO\xbdӿ\xbcU,\xa1\xbc=SԞn\xa0\x9b\xa2\xd7Pl\xba\x14ke0\x8c\v\x9d\x95\xdf\x14\xf9f\n\xd1\xe3\xe5\xe7\x90lE\xcbvo\x97\xfd\u007f\xac\n\xc5h\x97Y\x88\xa0\xf2\xbcm\xf2\x04\xce\xf2ʜ\xefx^1ѓ\xc0\x0e\xcdZ҂\xd2 \xb9\x88ա\x88\xe6\xf5\xfc\x1e\x8d\xe1c\xe9\xf3\xd1G\x9f\xd5iw'\xadf}r\xa5\xba_\x89\x1e\xd1\xe0Ǧf\xd3[\xf2\xd2k\xd1\xd3\xc5\xe3c*Ї\xf5\xe5Q\xa0\xf3u\xe7\x14Ou\xa6\xc6|Be9\xb1\xab\xe8\xc5\t\xe8\x94\xda\xf1I\x15\xe3\xd9ƛ\xc4:q\xbf\x02<\r\xf2\x88\xeap\x12q\xe6+\xc1G\xd7\u007fC\xbdu\x12\x8f\xe4\xaao\xa4\x9e;\tx\xb4\xd6;Uŝ&y\xa4\u009b^\xbb\x9d\x04\xed\xea\xba\xf3\x15\xdb\xd7\xeb\xcbz\r\x17y\\\xd5\xccV]_\xe4B'\xd4U\x8f\xa9\xa6\xceR\xec\xc4\xcaiS\x19\x1dY\xf7\xd8zi\xbf\x1e:\x024\xa5J:R\x05\x1d\x818Y\x1bM\xad}\x8e\xc0\x9e1\xbb\x93R2\xf1g\xe3u\xff\xc6ʒ\xcb͐\xf3\xa9\xf21)\x1b\x83\xd2iw͞pt\x9d\xe3^X\x11[\xd2\u007f\x90\x18\tA\xea\xa4\x13\x97V-\xe1\x9d\xdc\x0f\xe0\xbaf\xe8\xa8\xcb\xdd\xffb\x85\xb6\xf5̅\xe8~\x95\xe1\xc0vA\x85\x0f\x9cL<\x10\xa6\x81c\x1f3E\x99\xa2t\xcfߝ\x8b8>\x1e\f鈴\xa6\xfd\xe7\x98\xeb\xcc\xed\xf6D\xff\xb9\xa8\x84\xe5e\xf4\x10\x97Z\xed\xb8K\x8amq\xdf\xd0\xf3\x0f徇X\xed\x1d\xa4\x8f\x0f\xcd\xf9Z\x1e\x84\x02,v*\x9eQ\b`f\x88~\xe6\xbf\t\xcc\xd4\xc2}\xe6C\x9c\xac\xe5!|;x\xe5\xce`,@\x95\xf5\xd7j\x05\x81q\xdf\x15\x9aH\x02`ԺL{\xb8\xde\x19w\xef\xfe\xacP\xefA\xed\\\x154\xb8<3\r\xc7^S\x98J\xb4\x1d\x1eA\x01\xfa/Q\x0f<\xffVc\xc0;\xe9mp\x14\xec\xc1\x1e\x1d\x1cRZm\xb4C\xfa\x99\x02\x99\x91\xa1Q\xa8R5\xb3\xe3\xf20ijR\xbbu\xcf\x1b\xfb\x1c\x1f\xfd\xcc\xfa\x1dg\x89\x80N\x8f\x81&@\xa6vߦe\xecg\xbbm\xcf\x15\v\xcdEC\xc9n`Z7\xed9\xbah\x8f\xe8\x9e=\"*:..J&SJ\x97\xecY\xa2\xa33\xc6G爐N\x8b\x91f@\x1et\xbf\xa6\xf4\xb5&\x15\x99\x92K\x14)E\xa5\xf9\xba\xe6t\xbfjB\x9fjB\xf1cn\xa7\t\xfd\xa8\xc7\xf5\xa1&\xd0\xf0L\xd1ә\xe2\xa7sDP獡f\xa3\xa8Yə\xfc\xfb\xe4\x1cy]M\xfd\xa0r\xbcW\xda\xce9\xfc\xf7\x87\xe3#\x15\xacN\x10\xa4D\x0e\xb2\x1e\x1aA\xca\xf9\xf2\xc1\x8f?\r\xa9x\xb1)\xac\u007f\xffy\x0e\x9f\x87f\xe04\"\xe4\x92\xd6\xf1Y\x04\x0f\x9a\xefp1\x92\x95f\xab,|\xb7\xe3,\\(\xa2\xaa<\x04!\xfa\xfbs`\xf9h\x99\xad\x12\x11\xf5c{\xb8\xf2l۩\xe7G\x10\xb2\x91\xb8N\x1e\x137\x8b;\xdf\xdfV\xe5\\\x9b\x1f;\xd5\x1f\xb86\xf6S\x99W\x8a\xe5\xad\xf1l\xad\xe6bW\xe5L5\xf5g\x00:\x91%^\xc1G\x1a\xaad\t\xa6g\x00~bv\xe8\x15\xb04\xb5\xa4b\xf9gŅAu-\xf3\xaa\b$ZA\x8a:Q\xbc4\x96\x14w\x86\x99J\x83܂ɰ=\x0e\x95\x9f\xb5\x14\x9f\x99ɮ`\xadm\xbbu\x991\x1d\xbe:\x129\x00\xbe\xca\x1c\b7m\x14\x17\xbb\xa1\xd1\xde\xc1\xb5\x92\x02\xf0k\xa9P\x13ʐZΊ\x1dCș6`x\x81\xc0\xfc\x80\xf0Ĵ\xc5a+\x15\x98\x8c\xeby\x9a\x10\x90\x0e\xb6\x0e\x9d\x0f\xfdj\x87P\xca\fztZ\xa0\x82T\xaf\x8f$\xb2\x03\xf3\xdd\x0e\x87\x81\xb9\xcf\xfb\xb7Nn\x92\f\vv\xe5[\xca\x12ŻϷ\x0f\u007f\xbd\xebTCO\x0e\xfc,\x81k`\xf0`\x85\x1a\x94_~`2f@!q\r\x85\xa1\x16\xa5\xc2U\xa0LZ\x83\x04\x90\nJT\\\xa6<\t\x14\xb5\x9du&\xab<\x85\r\x12q\xd7u\x87R\xc9\x12\x95\xe1aٸ\xd2R\x13\xad\xda\x1e\xc6ohR\xae\x95\x93\"\xd4Vp\xfcb\xc0\xd4\xd3\xc1\xc96\xd7\r\xfe\x96\xc0\x1d\xc0@\x8d\x98\x00\xb9\xf9\x19\x13\xb3\x86;T\x04&`\x9dH\xb1GE\x14H\xe4N\xf0\xff\u05305I\xac\xb1\x82dЯ\xe5\xa6\xd8\xc5'X\x0e{\x96Wx\x01L\xa4P\xb0\x03(\xa4Q\xa0\x12-x\xb6\x89^\xc3?\xa5B\xe0b+\xaf 3\xa6\xd4W\x97\x97;n\x82zLdQT\x82\x9bå\xd5t|S\x19\xa9\xf4e\x8a{\xcc/5߭\x98J2n01\x95\xc2KV\xf2\x95E]X\x15\xb9.\xd2?\x05\x8e\xea7\x1d\\\x8f֊+V\x89Mp\x80\xb4\x99\x13\x18\xd7\xd5͢!4U\x11u\xbe\xdc\xdcݷ\x85\x89\xeb>\xf5-\xdd[\x12ְ\x80\b\xc6\xc5\x16\xfdj\xdc*YX\x98(\xd2Rra\xec\x8f$\xe7(\xfa\xe4\xd7զ\xe0\x86\xf8\xfeK\x85\xda\x10\xaf\xd6pmm\x06\xc9aU\xd2\xeaI\xd7p+\xe0\x9a\x15\x98_3\x8d/\xce\x00\xa2\xb4^\x11a\xe3X\xd06w\xfdƎj\xad\x0f\xc14\x8d\xf0+\xac\xf1\xbb\x12\x93Β\xa1~|\xcb\x13\xbb0\xac\xe6\xabU@O\xfb\xb92\xbcj\xc1\xab\x1ejޯ\x9f\xd46\xb16\xe1\b&x\x15\xb3>\xfa2BM\xfb\t\x8b\x92\x96\xeb\f\x8a\xf7\xbe\x19\xa1H4Jk\x17$\x18ˠޤ\xd7jp\xa4T\xecp\x19\x12\xbd\xf6<\xf5Z㈚\xd3\x14\xa5\x92h~'X\xa93i\xc8.\xc8\xca\f\xb5\xeaM\xe0\xfa\xee\xb6\xd7)\xf0\xd9s\xddڽJcJSxb\xbc\xbf~B!y\xb8\xbe\xbb\x85\ar#0\xc0\x04g\xfd\xc0TJX5\xf8\x05Yz\xb8\x97?i\x84\xb4\xb2\xda ز\x8b\x11\xc0\x1b\xdc\xd2bSH0\xa8\x03*E\xb2\xa7-j\xb22kk\xa4Sܲ*7^\xb9p\ro\xbf\x83\x82\x8b\xca\xe01\xdfa\x9a\xf7\x8eH\x16\x9c\x9b\x8d\xbe\x97_P\x1b\x9eD\x10\xf4\xfd`\xc7\x16Q\x9f24\x19*\xd2t\xf6\x835\x1e\xa3s\xafIo\xd8#\xf9\x1f\x1b'Nd\x88\xf2\x1cJ\x99\xc2ލ\x04\x9bC@zj\xc2\x1b)sdC\"\x88_\x93\xbcJ1\xad}\xc6A!\xeb\xcd\xf6樓\xf5\xae\x19\x17\xb4dɗ%TE\xfdud\x9e\xd6\xf83\x85@Z\x97\v\a\x13\xb8\xf3\xf16#\xab\x97\n7X\x8c\xe09\xcbb\xb0^<\xdb\xe4x\x05FUC\x8a#\xc0`J\xb1\xc3\x04\xcdB\x04\xb2\x84du\x1fo\x1bs\x9e \x11\xab\xb6\x80\x96j\x964#\xf3\xfb?$X&\xe5c\f\x91\xfeA\xed\x1aK\x0f\x89\r\xf4`\x83\x19\xdbs\xa9t\xdf]į\x98T\x06\xc7\xd6\x113\x90\xf2\xed\x16\x15\xc1\xb2\xd1I\x1d\xccL\x11kZ\xdfRQӌ?\x9aW\xc3tb\x9e\xa5\xc6\xd8T\xac]\x1b\x85\n\x16qR\x87U\t\\\xa4|\xcfӊ\xe5\xc0\x856L$n~\xac\xc6ox~0'\x10G\xf8;k\x16fA\\\xea\xb8\tR \xf9\xf6\x85T\xc3\xc2\x11\xca1\x98q2l\x18i@9fۛ\xa2($\xf6\xa8\xa4\xd6\x1e5z\xe7\xa2\xe1\x94\xf3\xb0s\xb6\xc1\x1c4\xe6\x98\x18\xa9\xc6\xc9\x13#\x04\xae\xc4\xea\xcf\x11\xca\x0ehҮ!\x9eU\xa2M!K\x9d\xf1$s\xce0I\x99\x85\x05\xa9Dm5\x06+\xcb\xfc05i\x88\x91\f?\u061c\xd2hJ\x84\xfa\xe8\xc3\x1dS$M\x89\xd4\xc1M\x99\xd1\xc6]\xaa\xd7b\xf3J\xf4\x0e\x9a⛄\xfd\xf6\xa8\xfb\xf3\v;\x91\x9bS\xb0|\xbb\x05,Js\xb8\x00nBm\fTr\xb0\x1a<\xfe`\x8c;m\xb5\xdc\xf6{?\xfbjy\x16\xae\xd5h\xfcA\x98f\x8d՝\xb7U\x8b\x18\xf6\xa1\xdd\xf3\x02\xf8\xb6fXz\x01[\x9e\x1b\xb4\xbe\xd4\x1c\xa2-Gg\x96s\xcfI\xa0X\xdbK\xa5`&\xc9n\xea\xfd\x81\x88\x1e=Z\xf5\x018\xbf<\xc40\x96\a\x11 \xa1v*\xec\x96\x12WX\xb8\xad\xaa{\xbb>\x9a\x1a\xeb\x01\xbe\xfb\xf8\x1e\xd39\x92A\xbc\xa4\x1eM\xea]\xcf\xd3i\xa3`'\x18\x05\xb25)\xeb\xa6\xd51\x9eې\xbc\x00\x06\x8fxp\x9e\xd5`p9T\x88\xb5\xac\x06\xa9\xd0\xee\x8eZ5\xf2\x88\a\v\xcaowF\xc1[\"*\xae<\xe2!\xb6i\x8f\xa8\x84\x9f\xdf\xf0qԥ\n;\x8b\x98\xa5Ԕ\x9a\xa8~퀑q\x93\x85eJ)\x94@\xf1\x13\xa7]3\xac\xb3\xc7\xff\x88\x877ڱ\x8fVM\xc6\xcb\x05\x14 \x85\r\x1a\xed\n\v\x9b\xdb\x0f,\xe7i=\x98]'\v ފ\v\xf8(\r\xfds\xf3\x95kBQ\xa4\xf0^\xa2\xfe(\x8d\xadyQ\x12\xbbI\x9cH`\xd7\xd9.K\xe1\xcc\x02\xd1e\xd1\xf8\r\x0eք\x92\x88\xd6l\xe3\x1an\x05\xc5g\x8e>Kؔa@ΡUT\xdan\x8f\v)V\xd6L\x87\xd1\x16\x00m\xe3\xe5Y%U\x87S\x17\v!\x0e\xa2\xe8ѻ'k\xe5\xbe\x1c\x1d,L\x15\x85e\xce\x12L\xc3v\xa5=\xc5`\x06w<\x81\x02\xd5\x0e\xa1$\xbb\x11/T\v4\xb9+'Ha\xbck\x11\x8a7\vi\x1cb+Z\xf5\x91-\x03\x9b\xa3\x9a\x8f\x1cYL7\x8f\x9b\xa55\xef\xd6\x1f\x8a\xa2~\xfb||\x99eYȯc\x1f\xc4!\xe9\u070f\x82\xd9\xcd\xde_ɼZ\xf1\xfe-\xce\x1a2\xae\xf4\x1a\xde\xd9\xec\x80\x1c\xdb\xfd\xc3.ak\xa8(\x90\x84\t\xd7@r\xb2g9\xb9\x0f\xa4\xbc\x05`\xee\x9c\t\xb9=\xf2\xa0\xe2T\xccS&\xb5\xb3\xf9[\x8e\xb9=*<\u007f\xc4\xc3\xf9ő\xf6:\xbf\x15\xe7q0I\xe7\x1f)\xad\xdak\x91\"?\xc0\xb9\xfdvn\x1d\xb3%K\xe4\x04\xe7m\x81TG7\xb5g\xf9KB\x01\x8a\xb5\x83\xd7B\x9d\xeb\x13or\xe1\xe7f\x11-ӥ\xd4#\xc7N#h}\x96ڸ\r\xc0\x8e\xbb=\xb0C\x18\x13\xfd\xf9]C`[\x83\n\xb4\x91*\x9c.\x93\xda\xedm\x90\x13\xe7\xf5<\xef\x89\xd5\xf5n\xa4\x03LA\xe6y\xa3!\x9cN?w\xc7\xce\xf4\xf7<\xcc\xc4:K\x16v\xa9d\x82ZϋR\xa4\xe5\x98ٰ\xad7k\x99\v\u07b6Q\xaa9f+9\x94e\xae8\x91\xf6\x84\xc0\xe6\xe6kkߙ\xd4\x10\xfd\x8e\x11\xe5Sp\x04\x9b5V\x14\xac\x9f\xe9\x10\x8d\xee\xb5\xeb\x1d\x16\xa0\a\xe6\x02&\xb5\xab\xacRY\xe67{\x91\xfc\xbd9\x1e\x05\x17\xb7v x\xfbb\xce\n\x04U\x8e\xa7\x862ס\u007fÐ\xba\"6~\x85pn/\xedY\x8d\xc2\x0eg\x8fO2\xe29\x05\xe4L\viڛ5~\xa47\x1a\xb6\\i\xd3 \xbc\x00*\xd7\xf68\xf9ecLq\xa3\xd4\xc9!\xe6'\u05fb\xb5\xad\x98\xc9'\x9fe\xb2$\xb0\x0e\xc4\xcf\xd8\x1e\x81o\x81\x1b@\x91\xc8J\xd8\r/R\x174\xcc\x02\x88\x8e\x89ΘD\xda\xccVgQ\x15\xf1\x04YY\xe9\xe4bvw\xac\xdd\xe5\a\xc6\xe3v\xa7\xe04\xb6\x9a\xa9\x8c\x92\xa1\xd2M\x93\xf1\xa9%\xedt\xa2\x82}\xe5EU\x00+\x88-K\xe2ƭKJ\t\xb9G\x8e\xd7O\x8c\x1b\x9f\x8e\xe9\x0eV\x97i\xd3D\x16e\x8e\x06C\xbaI\"\x85\xe6)\xd6\xee\x83\xe7\xff`\xf2\xceXa\xb0e<\xaf\xd4\x02\x1d\xbd\x983K\xe36\xaf\x9e\x9e?\x18\x8bGde\x89\x19\xb9\xe9\xbe\xc0i\x9e\xb7\x1f\xa5Z\xe62\u007fV\xf8\xfc\xaei\xa98I\xa9\x9c\xf3NgaZ\xef\xb5\xeb\x9dz\xe1e\xe20\xe6\x9e\xceB\xb5\x98\xbc\xba\xa7uyuO_\xdd\xd3W\xf7\xb4W^\xdd\xd3W\xf7\xf4\xd5=\x1d.\xaf\xeei\xab\xbc\xba\xa7\xd1\xf6#\x06Õݹ\x9dh\x10\x85Ud\n\xc6\x1c\xda3c\xf9L\xa3\xeb\xbc\xd2\x06Ւ\f\xe9\xdb\xe1\x9e\x039\xf4\x89k\xb2\xb2\xd7\x1dǤ\xa6I]i\x8c^\x9d2MK2,&w\xb1%\xc2\v\x8fN\x83\x1e϶\x8fM\xa0\x9bK\x9b\xeb\xe6\x8e\xd7\xe9j\xee\xaf\x11\x82\x18\x19\x86\xf7\xdcs\x17\xa6\xda9W\xdd\xdc7\x1b\a\x04\x8c\u007f\x97y\xe5\x91im3\xc9lӉ\xf8c\x16>вw\xb8\xd0%\xa6\xea$~\xff\xaei\x19\x91m6\x9ec\xe6O-Ѱ\xfd\xdbu\xf7\x8b\x91>\xe3ldfO\xdcd\xee2\x17\x85\xaeb\xd7Nk\x0fr\xea/R\xf6i<\x02Q*\x10\x11\x90*E5{\xb8\xb2D4g\x85\xb2\x17\x13u\xc7\xef\xbd:\x10\xdeĢV탛1\xee\xc8\xfa\x16|\x02?r\x91:ސ\x10\xb6\xfc\v\xfbj\xa1\xbd'S;>\xe3b\xd4x\x9b\xbdC#\x8d%#5j\x83\"{\xb8\xad\xd7pÒ\xacn8\x02ю\x9c1\r[\xa9\nf\xe0\xbc>\x8d\xbb\f=\xa9\xe6|\r\xf0\x83\xac\x0fB[\xaf܌\xc0ռ(\xf3\x03E?p\xde\x05\xf4m\xa23*~ڿ\x04\xe7\x1fD\x8b\x88\x80\xef\xba=\x06\x8e}\xc3shI.\xab\xb4\x1ea\x82\xddL\x1c\xe0\xf3\x83\xf5\xa6\xec#PI\xf3X\x96\xf7\x95B$\xdc{Kk\x04\xe4\u0603\x82\x8bH6~8\xac\x8dTl\x87\x1f\xa4{k1\x86f\xdd\x1e\x9d\xe76\xbd\xae\n\xa9\"\xfe\xeeר$\xbb\xb9\xf5\x016\x19d~\xb55g\xe9\x84\xed\x98\x12\x9bY\xe7\xc6\xe4\x11\x93\xbb\xbf\xff\xe0&dx\x81\xeb\xf7\x95;\xa8_\x95Li$J\x87\x89\xbaN\x9bq;\x97\xc9'ȥ\xa7\xc3\xf7\xfdy(\xb4\x19k6'\xe0\xa4\xd9\xec;O\x1f\x06\xd2ň\xfc\xc3p\xcfV \xdbb\xe2\xd4Ѿ\u070e\xc2bZ˄[]d\xb7\x84l\xa2\xd8\xcb=\x157eQ&TF\xa5\xf1ӓ@\xf5%,T}+\x1c\xa7f^\xe2\xfc\xe9\xa8c`\xf0\x90\xfa \xfd\xd7k>d\xf7\x84'\x90v\xafT\x86\xbd-\xae\xeb\xb7I\x8fI7\xb3\xfe\xc7\xd7\xfe\xb0\xef\xba\x1a~\x0etU\xbfPz\x16AY\xf7\ng̣\xaf\xee\xb9΄\x95\xa6R\u07bc&\x95\xb2\xef\xe6\x11\x10t\xcfʝ\xf6\xeck\xf3\x04\xf6\f/\x9bG\xb1\x9bh\u007f\xf6\t\xee\x01\xfe\xd5\x0fȎ\xbe\xa8ꬫ{\"{E\xf0Oc\xe7\xe0:\xb0\xef\f\xce\xcc\xf43\xb5\xa9\x93|=\xa1m\xc7\xf0>\xe1\xdd\x18\xea\xc3Y\x9b+\xf8\x88O\x03\xb57\x82&q|\xa6\xe6R31\xb5{\x04CO^ONq_\xf7\xb2y\xb1\x03ڢ\xab\xe6z\xcd{\t7,\xcf[\x10]\x0e\xec\x10[\xff̷n\x03'\xa19\xfd\xe5\xa8Ũ\xe2\x9aTZc\nkpI\x1dUjT{L[B\xe2mx\xbb\xa6\xda4\xcfE¯\xbf\x9d5\xab\x92%\t\x96\xc6'v\xb5\xffk\x80\xf3s\xfb#\xbc\xfco\u007f&R8G[_\xc1\xbf\xfe}\x06\xde\x00?\x84\xe7\xfd\xa9\xf2\xbf\x01\x00\x00\xff\xff\x9dJq\x1dHa\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec=]s\x1c7l\xef\xfa\x15\x18\xf5\xc1\xed\x8c\xee\x14O\x1f\xdaћ++SM\\\xdbc)zi\xfb\xc0\xdb\xc5\xe9\x18\xed\x92\x1b\x92{\xf25\x93\xff\xde\x01\xc8\xfd\xbc\xfd\xe0\x9e\xa5i\x9a\x11_\x12\xed\x91 \b\x80\xf8 A\xf8l\xb5Z\x9d\x89B>\xa0\xb1R\xab+\x10\x85\xc4\xef\x0e\x15\xfde\xd7O\xffj\xd7R_\xeeߟ=I\x95^\xc1ui\x9dο\xa1եI\xf0#n\xa5\x92Nju\x96\xa3\x13\xa9p\xe2\xea\f@(\xa5\x9d\xa0ϖ\xfe\x04H\xb4rFg\x19\x9a\xd5#\xaa\xf5S\xb9\xc1M)\xb3\x14\r\x03\xaf\xa6\xde\xff\xb4\xfe\x97\xf5Og\x00\x89A\x1e~/s\xb4N\xe4\xc5\x15\xa82\xcb\xce\x00\x94\xc8\xf1\nl\xb2ô\xccЮ\xf7\x98\xa1\xd1k\xa9\xcfl\x81\t\xcd\xf6htY\\A\xf3\x83\x1f\x140\xf1\xab\xb8\v\xe3\xf9S&\xad\xfb\xa5\xf3\xf9\x93\xb4\x8e\u007f*\xb2҈\xac5\x1f\u007f\xb5R=\x96\x990\xcd\xf73\x00\x9b\xe8\x02\xaf\xe03MU\x88\x04\xd33\x80\xb00\x9ez\x05\"M\x99T\"\xfbj\xa4rh\xaeuV\xe6\x15\x89V\x90\xa2M\x8c,\x1c\x93\xe2\xce\tWZ\xd0[p;l\xcfC\xed7\xab\xd5W\xe1vW\xb0\xb6\xdco]섭~\xf5$\xf2\x00\xc2'w ܬ3R=\x0e\xcd\xf6\x01\xae\x8dV\x80\xdf\v\x83\x96P\x86\x949\xab\x1e\xe1y\x87\n\x9c\x06S*F\xe5\xdfD\xf2T\x16\x03\x88\x14\x98\xac{x\x06L\xba\x1f\xe7p\xb9\xdf!d\xc2:p2G\x10aBx\x16\x96q\xd8j\x03n'\xedh\x1ao`%2؋\xac\xc4\v\x10*\x85\\\x1c\xc0 \xcd\x02\xa5j\xc1\xe3.v\r\xff\xa1\r\x82T[}\x05;\xe7\n{uy\xf9(]\xa5b\x13\x9d祒\xeep\xc9\xdaRnJ\xa7\x8d\xbdLq\x8f٥\x95\x8f+a\x92\x9dt\x98\xb8\xd2\xe0\xa5(\xe4\x8aQW\xacf\xd7y\xfa\x0f\x15G\xed\xbb\x0e\xaeG\xfb\xcd7V\x84\x13\x1c \x8d\xe8\x05\xc6\x0f\xf5\xabh\bM\x9f\x88:\xdfn\xee\xee\xdb\xc2$m\x9f\xfaL\xf7\x96\x845, \x82I\xb5Ű\xa3\xb7F\xe7\f\x13UZh\xa9\x1c\xff\x91d\x12U\x9f\xfc\xb6\xdc\xe4\xd2\x11\xdf\u007f/\xd1:\xe2\xd5\x1a\xae\xd9\xee\x90\x1c\x96\x05\xed\xc0t\r\xb7\n\xaeE\x8eٵ\xb0\xf8\xea\f J\xdb\x15\x116\x8e\x05m\x93\xd9\xef\xec\xa9\xd6\xfa\xa12o#\xfc\xaa\xf6\xf8]\x81Ig\xcb\xd08\xb9\x95\to\f֞\xb5\n\xe8iP߆w-\xff\xc2j\xaa\xff\xb5\x87\x87\xd7eլh\xc9~\xb8\x1ds\xb81c$W\x1e\x1a\xe9\x14\xa5\xfb\xdc\x1d҂-J\x04(3\x98t\xb5^\xac};\x82\tAխGp<\xe2*\xff\x84yAjc\x06\xc5\xfbЍP$\xfa\xa4\xb5;U\x19\xfeJ\xcd\xea\xa0]\xe1H\xb9\xf1t;$\xbe\xede\x1a\xb4\xd7\x11Wa\x92\xb3\xd4\x12+\xef\x94(\xecN;\xb2q\xbatC\xbdz\v\xb8\xbe\xbb\xed\rjq\x9e\xb0b\x1bΌv\x1a\x9e\x85<\xe6\xb4o$\x97\xd7w\xb7\xf0@.\x11V0\xc1[rp\xa5Q\xac\x8e\xbf\xa1H\x0f\xf7\xfaW\x8b\x90\x96\xac\x95*\xbb|1\x02x\x83[\xda\xf4\x06\t\x06\r@ch\x0fXFM\x97n\xcd\x0eG\x8a[Qf.(9i\xe1\xfdO\x90KU:<\xe6;L\xf3\xde\x13\x89\xc1\xf9\xd5\xd8{\xfd\xb3\xf5\x8c\x8c \xe9Ǒ\xa1\x03[\xaa\xd0)\xec\xb9\xdf\x18Ue\x86`\x0f\xd6a\x0e\x9b\x00\xa5\xb6\xd5\xcc\x15\xd6\aY\x16\xc0X\xd8\x1c*܇\xd7M^\xb8\xd8dx\x05Δ\xc3\xd3Nm\xdd!\xda|C\xebd\x12A\x99\xf3>i\xfc\xc8\x01\xc2\x18\xfea\x84(=\n\x90\x91\x17O\xe4h\x06\n\x91\xb7\x90e-\xe2\xceS\x05\xe0\xbf\x14|$\x03\x97\x90ٹ\n\xe6Lb\xc6&TiȴzD\xe3g$W\xe1Yf\x19oi\xcc\xf5\xbe\xe3d\xb5\x1b\xd9\x16\x83\x19\x19Iؖdv\xd6@\xb2?*#RY\x87\"]\x9f\xbf\x16\xf3\xf0{\x92\x95)\xa6u\x983\xa8Kz\x8c\xbb9\x1a\xc4\x01\xa1\x90\x8a43\x85_DtU\xff:B\r\xf65\x85A2\x18 \x95\x87I\xa4!E\xb3\x19Q\xd2Ԥ\xc3|\x04\xcfٝ\xbc\x80j\xc2\x18q\x98\xa0Y\x154/!Y=&\xb8b\x99L\x90\x88U;\\L5&\xcd\xc8\xfa\xfe\x1f\x12l\xa7\xf5S\f\x91\xfe\x9d\xfa5\x8e%$|6\x01\x1b܉\xbd\xd4\xc6\xf6\xa3\x13\xfc\x8eI\xe9Fw\x9bp\x90\xca\xed\x16\r\xc1›\x8e\xbf\xa7\x885mV\xa9\x99i\xc6\x1f\xad\xaba:1\x8f\xa91\xb6\x14v_F\xa1\x02#NV\x8fuC*\xf72-E\xc6jB\xa8įO\xd4\xf8\x8d)\xb7\x19\x818\xc2\xdf+\xa3j\x15ĥ\x8eW\xaa\x15\x92ۗk3f\xb7|;\x063N\x86\x8d`gr̅k\x9a)3\xb4\x01\x15o\xfe\x1a\xbds\xd1p\xca\at\x99\xd8`\x06\x163L\x9c6\xe3\xe4\x89\x11\x02\xdfb\xf5\xe7\be\a4i\xd7ߚU\xa2M#\x87l'\x93\x9d7V$e\f\vR\x8d\x965\x86(\x8a\xec0\xb5h\x88\x91\x8c0ٜ\xd2hZ\x84\xfa\xe8\xc3\x1dS$M\x8b\xd4\xc1M\x9b\xd1\xc6]\xaa\xd7b\xf3F\xf4\x0e\x9aꇄ\xfd\xf6h\xf8\xcb\v;\x91[\xa2]\xc3\xed\x160/\xdc\xe1\x02\xa4\xab\xbe\xc6@%W\xb1\xc1\xe3oƸ\xd3v\xcbm\u007f\xf4\x8b\xef\x96\x17\xe1Z\x8d\xc6߄il\xac\ue0adZİO\xed\x91\x17 \xb75\xc3\xd2\v\x8a!\x1d\xb2/5\x87h\xcbљ\xe5\xdcK\x12(\xd6\xf6R˅Kv7\xf51PĈ\x1e\xad\xfa\x00\xbc_^\xc50̃\b\x90P;\x15|\x82)\r\xe6\xfed\xf4\x9e\xf7G\xf3\x85=\xc0\x0f\x9f?b:G2\x88\x97ԣE}\xe8y:m\x14x\x81Q [\x8bb7\xad\x8e\xf1\xfc\xf9\xf7\x05\bx\u0083\xf7\xac\x06\x83ˡF\xac\x155H\x83|\x18\xcfj\xe4\t\x0f\f*\x9c\xaeG\xc1[\"*\xbe=\xe1!\xb6k\x8f\xa8\x84_8\xd7\xf3ԥ\x0f\xbc\x8a\x98\xadԴ\x9a\xa8a\xef\x80\xd3q\x8b\x85eJ\xa9j\x15\xc5O\\vͰΕ\xd2\x13\x1e\xdeY\xcf>\xda5;Y,\xa0\x00)l\xb0\xc8;\xac\xbaKy\x10\x99L\xeb\xc9x\x9f,\x80x\xab.\xe0\xb3v\xf4\x9f\x9b\xef\xd2\x12\x8a*\x85\x8f\x1a\xedg\xed\xf8˫\x92\xd8/\xe2D\x02\xfb\xc1\xbc-\x957\vD\x97E\xf378\xb0\t%\x11\xad\xd9&-\xdc*\x8a\xcf<}\x96\xb0i\x87\x15r\x1e\xad\xbc\xb4|\x1b\xa3\xb4Z\xb1\x99\xaef[\x00\xb4\x8dW`\x956\x1dN],\x848\x88b@\uf7ac\x95\xff\xe5\xe8\x1ek\xaa\x19,2\x91`Z\x9dJ\xf3\xa5\x99p\xf8(\x13\xc8\xd1<\"\x14d7\xe2\x85j\x81&\xf7\xed\x04)\x8cw-\xaa\x16\xcc\xc2\xc0\x1d\xd0P[Ѯ\x8f\xecY\xb19\xaa\xfb\xc8\r\xd9t\xf7\xb8U\xb2yg\u007f(\x8a\xfa픎e\x96e!\xbf\x8e}\x10\x8f\xa4w?r\xc1\xc7\xd6\u007f\x90ye\xf1\xfe3\xce\x1a\ni\xec\x1a>pBK\x86\xed\xf1\xd5)ak\xaa(\x90\x84\x89\xb4@r\xb2\x17\x19\xb9\x0f\xa4\xbc\x15`\xe6\x9d\t\xbd=\xf2\xa0\xe2T\xcc\xf3N[o\xf3\xebc\xf5\xf3'<\x9c_\x1ci\xaf\xf3[u\x1e\a\x93t\xfe\x91Ҫ\xbd\x16\xad\xb2\x03\x9c\xf3o\xe7\xec\x98-\xd9\"'8o\v\xa4:\xba+\xa7\x8e,\t\x05(֮\xbc\x16\x1a\\'X\x90\v?\xb7\x8ah\x99.\xb4\x1d\xb9]\x1cA뫶\xce\x1f\x00v\xdc\xed\x81\x13\u0098\xe8/\x9c\x1a\x82\xd8:4`\x9d6U2\x03\xa9\xdd\xde\x019q\xde\xce\xf3\x9eX]\x9fFz\xc0\x14d\x9e7\x1a\xc2\xeb\xf4s\x9f\xe5@\xff?\x0f3ag\x89a\x17F'h\xed\xbc(EZ\x8e\x99\x03\xdb\xfa\xb0V\xf8\xe0m\x1b\xa5\x9ac\x8e\x92\xab\xb6\xcc\x15'Ҟ\x10\xd8\xdc|o\x9d;\x93\x1a\xa2\xbfcD\xf9\x14\x1c\x81\x13\x1d\xf3\\\xf4\x13k\xa2ѽ\xf6\xa3\xab\r\x18\x80\xf9\x80\xc9<\x96\xacT\x96\xf9\xcdA$\xffj\x8eG.\xd5-O\x04\xef_\xcdY\x81J\x95㩡\xccu5\xbeaH\xfd!6~\x85*=C\xf3]\x8d\xc1\x0eg\x8fo2\xe29\x05\xe4L+\xedڇ5a\xa6w\x16\xb6\xd2X\xd7 \xbc\x00\xaa\xb4|M\xfd\xba1\xa6\xba1\xe6\xe4\x10\xf3\x8b\x1f\xdd:V\xdc\xe9\xe7\x90Դ$\xb0\xae\x88\xbf\x13{\x04\xb9\x05\xe9\x00U\xa2K\xc5\a^\xa4.h\x9a\x05\x10=\x13\xbd1\x89\xb4\x99\xad\xc1\xaa\xcc\xe3\t\xb2b\xe9\x94j\xf6t\xac=\xe4g!\xe3N\xa7\xe04\xb6\xba\xa9ġ\xa1\xd6͆\n\x19D\xed\xec\xb5\\|\x97y\x99\x83ȉ-K\xe2ƭ\xcf=\xaaR\xdd<\xaf\x9f\x85t!\x83\xd8_\xac.Ӧ\x89\u038b\f\x1dVYE\x89VV\xa6X\xbb\x0f\x81\xff\x839ZcM\xc0VȬ4\vt\xf4b\xce,\x8dۂzz\xf9`,\x1e\x91\x15\x133\xf2\xd0}\x81\xd3\x1f\xa8\xc5\xe7\xa5-\xcdF\xab\xf3\x87\xe6\xad\xca\x0f\xe4\xa0-{ 0\x9bo\x16\x834\xc4d\x99\r\xe7\x8f\xcd@]\x92[\x16\x1b\x83G\xe4\x91\xc5g\x8fő\a\xf8\xb5}l\xceX\xb4\xd7\x16\x9b\x1f\xf6:Ya\x91\xb9`\xad\f\xafY\x90'f\x80E\x13,.\xdb+:ǫ\x95\xb95O\xad\x89̮\xe1|\xadY\x90C\xf9\\1YZQ\xb8F\xe7f\xd5\x19W\xf3'\x89?\x94\x91\xf5\xf2\xb9\xdf/\xe9\xe7O\xe7WEeUE\xc5\x02\xf38G\xe5M-͖\x8a\xa2\xea\xd2̨:\xebib\xe2\xa8|\xa8\xe3\\\xa7\xa9\xa5\xccfA\x8dg8M\x81\x1d\xca}\x8a\xc8k\x9a\x00\xd9\xcexZ\xec\x06\xccJ\xd3L\x87\xe1\x8a\x18U\x9b\xb7\xb5\xd9\xff\x85\x04\xfe袵\xe9\xb8\xc01Qɗ\xde\x10\xe2}\xe5\xf5\r\xb9\xd5\xe31\x9ew\xb6Op\xabG@\xden!/3'\x8b\xacU\x92\xc2\xed\xf0P?y\xffM\xf3\xd3\xcb́\xa1}\xf9V\v\xf0\x18\xc8n\x80 ,\xe8\a]?\xfc\xcc%\xfa\x1a\u007f\xd1]R\xec\x15|\xdc5\xcf\xfc\xf3\x95\xc8g+\x91\x97@1\xd8G>OY\xfe,%\x92\xce'\x06[\x93SG>?Y\x14n\x9d\x18pMB\x9czn2\x1drM\x1f\xa7\xf5\x9f\x99\x9c\xe0NDH\xd8l\x97\x1f\xbe\x11\xd0&E3{\xb9\xb2D4g\x85\xb2\x17\x13u\xe7\xefU\x1d\xa8\xaa{Q\xaf\xf6\xc5\xcd\x18wt\xfd\n>\x81_\xa4J=oH\b[\xfe\x05\x17\xc9\xe4w2\xb5\xe33.F\x8d\xb7ٻ4\xb2X\bR\xa3\x1c\x14\xf1\xe5\xb6]ÍHvu\xc7\x11\x88<\xf3NX\xd8j\x93\v\a\xe7\xf5m\xdce5\x92\xbe\x9c\xaf\x01~\xd6\xf5Eh\xab\xca\xcd\b\\+\xf3\";P\xf4\x03\xe7]@?&:\xa3\xe2gC\xc1\xbfP\xd1,\"\x02\xbe\xeb\x8e\x18*8\x19\n\xbb%\x99.\xd3z\x86\tv\vu\x80\xaf\x0f\xecMq\x11\xa8\xa4)\x96\x15|\xa5*\x12\xee\xd5\xd2\x1a\x019V7r\x11\xc9\xc6/\x87\xad\xd3F<\xe2'\xedK{\xc6Ь;\xa2S\xdd5\xe8\xaa*U$\xbc\xfd\x1a\x95d\xbf\xb6>\xc0&\x83\xec\xa8\xda a;\xa6\xc4f\xf6\xb9sY\xc4\xe2\xee\xef?\xf9\x059\x99\xe3\xfac\xe9/\xeaW\x850\x16\x89\xd2\xd5B\xfd\xa0\u0378\x9d\xdb\xe9g.\xd7\u05ee\xbf٪p\x8c\x9c\xb1\xc69\x01'\xadfߩpY\x91.F\xe4\x1f\x86G\xb6\x02\xd9\x16\x13\xa7\xae\xf6\xf5v\x14\x96\xb0V'\x92u\x11\x1f\tq\xa2\xd8땊\x9b\xb2(\x13*\xa3\xb4\xf8\xe5Y\xa1\xf9VmT{\xab\xc6\nlvH\xf8\xeb\xd1\xc0\xd1\xe2\x9aN\xb3\xfe\xebu\x1f\xb2{*\x10\xc8\xfab\xa4\xd5ٖ\xb4u\t\xdac\xd2\xcd\xec\xff\xf1\xbd?컮\x86\xab\xbe\xae\xeaB\xb4g\x11\x94\xf5\xc5Vcj\f\xfb\xaa\xac\x89(\\i\x82yMJ\xc3u\xf3\b\b\xfa\xb2r\xa7U\x19n\xaa\xb6\xcf\xf0\xb2\xa9\xe3\xdeD\xfb\xb3U\xe3\a\xf8W\xd7\t\x1e-\x9c뭫\xaf\xea\xbe\"\xf8\xa7\xb1sp\x1fp\x9d\xc1\xb9\x9a\xcaԧN\xf2\r\x84\xe6\x81U}»1ԇ\xb36W\xf0\x19\x9f\a\xbe\xde(Z\xc4\xf1\x9d\x9aO\xcdĔ\xcf\b\x86*\xacO.q_\x8f\xe2\xbc\xd8\x01m\xd1Us\xbd\uef44\x1b\xaeV[w\xf19\xb0Cl\xfdG\xb9\xf5\a8\t\xad韎z\x8c*\xaeI\xa55\xa6\xb0\x06\xb7\xd4\xd1G\x8bf\xcf\xe5a+!\t6\xbc\xfd\xa5\xdc4\xe5\"\xe1\x8f?Ϛ])\x92\x04\v\x17\x12\xbb\xda\xff\x9aŹ/\xf7Z\xfdc\x15\xfcg\xa2\x95w\xb4\xed\x15\xfc\xe7\u007f\x9fA0\xc0\x0fտHA\x1f\xff7\x00\x00\xff\xffX\x13X\x17\xfbc\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VO\x8f\xeb4\x10\xbf\xe7S\x8c\x1e\x87w!\xe9{\xe2\x00\xca\r\x15\x0e+`\xb5\xda>\xed\x05qp\x9di;\xacc\x9b\xf1\xb8K\xf9\xf4\xc8v\xb2m\x93\x94]\x90\xf0-\xf6\xfc\xf9\xcdo\xfed\xaa\xba\xae+\xe5\xe9\t9\x90\xb3-(O\xf8\xa7\xa0M_\xa1y\xfe.4\xe4V\xc7\xcf\xd53ٮ\x85u\f\xe2\xfaG\f.\xb2\xc6\x1fpG\x96\x84\x9c\xadz\x14\xd5)Qm\x05\xa0\xacu\xa2\xd2uH\x9f\x00\xdaYag\fr\xbdG\xdb<\xc7-n#\x99\x0e9\x1b\x1f]\x1f?5\xdf6\x9f*\x00͘տP\x8fAT\xef[\xb0ј\n\xc0\xaa\x1e[\b\xc8II\x94\xc4\xc0\xf8G\xc4 \xa19\xa2Av\r\xb9*x\xd4\xc9\xf1\x9e]\xf4-\x9c\x1f\x8a\xfe\x00\xaa\x04\xb4ɦ6\xd9\xd4c1\x95_\r\x05\xf9\xe9\x96\xc4\xcf4Hy\x13Y\x99e@Y \x1c\x1c\xcb\xfd\xd9i\r!py!\xbb\x8fF\xf1\xa2r\x05\x10\xb4\xf3\xd8B\xd6\xf5JcW\x01\fLe[\xf5\xc0\xc5\xf1s1\xa7\x0fث\xe2\x04\xc0y\xb4\xdf?\xdc=}\xb3\xb9\xba\x06\xe80h&/\x99\xef\x85Ȁ\x02(\x18P\x808PZc\b\xa0#3Z\x81\x82\x12\xc8\xee\x1c\xf79G\xaf\xa6\x01\xd4\xd6E\x019 d\x05\xd9*\x03Ge\"~\r\xcavЫ\x130&/\x10텽,\x12\x1a\xf8\xc51f2[8\x88\xf8ЮV{\x92\xb1\xeb\xb4\xeb\xfbhIN\xab\xdc@\xb4\x8d\xe28\xac:<\xa2Y\x05\xda\u05ca\xf5\x81\x04\xb5Dƕ\xf2Tg\xe86w^\xd3w_\xf1Ч\xe1\xe3\x15V9\xa5\xca\n\xc2d\xf7\x17\x0f\xb9!\xfe!\x03\xa9\x1dJ}\x14\xd5\x12ř\xe8t\x95\xd8y\xfcq\xf3\x05F\xd79\x19S\xf63\xefg\xc5pNA\"\x8c\xec\x0e\xb9$qǮ\xcf6\xd1vޑ-ե\r\xa1\x9d\xd2\x1f\xe2\xb6'\tc\xed\xa6\\5\xb0Σ\b\xb6\b\xd1wJ\xb0k\xe0\xce\xc2Z\xf5h\xd6*\xe0\xff\x9e\x80\xc4t\xa8\x13\xb1\xefK\xc1\xe5\x14\x9d\n\x17\xd6.\x1e\xc61w#_\vݽ\xf1\xa8S\x06\x13\x89I\x9bv\xa4s{\xc0\xce1\xa8%\x95\xe6]H\xb2ƿ\xc42L\x92\x82f2_R\u007f\xbe\x8dfy\x9c䗃\n8\xbd\x9c`zH2S\xff\x86v\xa8O\xda`1Q\xa6\t\xbe\r%\x1d\xb4\xb1\x9f\xfb\xac\xe1\x1e_\x16n\x1fإɚ\xe7\xfa\xf5\xb9Q\x1bP\xfe7{\xb2\xb3p\xa7\x91\x15\xa9\xfc\x0f\xbb\x1c\xd5\x17\x03z0\x04\x1c\xadM};\x9b\x90\x19\xc8t\x92\xcfdH\xb0_@\xb3\x88\xe7\xce\xee\\\xde\x04Tr\xac\xa4\xf4\x13\x0e\xc9\x1e\xfc\x14\\\v\x06o纜\xf9\xf0z\x17\xa1\xe5\xe4?\xe9\u007fSN\xe3\x86\x18\x17}\xd7\x19\xd5\xe2C\xf2\xb8\xc4\xf8r\u007f\r(\xa31jk\xb0\x05\xe18\xd7.\xba\x8aY\x9d\xa6U3\x96\xday\x9fz\xa3\x80f\n\xa9O^\x0ehou\x03\xbc\xa8锿\xf2\f\xdb\xd3-\xd5\xf5\xebr8o\xa9R\xba-\xa4\xd9]\v-p\xf6.R\x16\xb3WJzq\xf3\x98\x11\xb2\xb9\x94\x1dg\xc6Uk\x8c\x8b\xc8<\x86\x9b\x10\x16\x93=\xbb\xcc滋\xf0\x828V\xfb1\xe0\xf3\xe8M\x9b\x9a\x17\xec\xee\xa7+\xee\x87\x0fW\xbbj\xfe\xd4\xcevT6t\xf8\xf5\xb7\xaaX\xc5\xeei\\0\xd3\xe5\xdf\x01\x00\x00\xff\xff\xfb\xb1p\x12\x1b\f\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VM\x8f\xdc6\x0f\xbe\xfbW\x10y\x0f\xb9\xbc\xf6$衅o\xc1\xb6\x87\xa0i\xb0Ȧ{)z\xd0H\xf4\f\xbb\xb2\xa4\x8aԴ\xdb__\xe8\xc3;\x1f\xeb\xd9&@\xeb\x9bi\xf2\xe1Ç\x1f3]\xdf\xf7\x9d\nt\x8f\x91ɻ\x11T \xfcS\xd0\xe57\x1e\x1e\xbe\xe3\x81\xfc\xe6\xf0\xb6{ gF\xb8I,~\xfe\x84\xecS\xd4\xf8=N\xe4HȻnFQF\x89\x1a;\x00\xe5\x9c\x17\x95͜_\x01\xb4w\x12\xbd\xb5\x18\xfb\x1d\xba\xe1!mq\x9b\xc8\x1a\x8c\x05|I}x3|;\xbc\xe9\x00t\xc4\x12\xfe\x99fdQs\x18\xc1%k;\x00\xa7f\x1c\xe1\xe0m\x9a\x91\x9d\n\xbc\xf7b\xbd\xaeɆ\x03Z\x8c~ \xdfq@\x9ds\xef\xa2Oa\x84\xe3\x87\n\xd1x՚\xee\v\xda]C\xfb\xd0Њ\x83%\x96\x1f_p\xfa@,\xc51\xd8\x14\x95\xbdʬ\xf8\xf0\xdeG\xf9x\xcc\xdeÁm\xfdBn\x97\xac\x8a\xd7\xe2;\x00\xd6>\xe0\b%<(\x8d\xa6\x03h\xc2\x15\xb8~\x91\xe6mE\xd4{\x9cU\xcd\x03\xe0\x03\xbaw\xb7\xef\ufff9;3\x03\x18d\x1d)H\x91\u007f\xbdD \x06\x05\v\x13\xf8c\x8f\x11\xe1\xbe\xe8\t,>\"7\xd2O\xa0\x00\v\u007f\x1e\x9e\x8c!\xfa\x80Qh)\xbe>'\x83wb\xbd\xe0\xf5:S\xaf^`\xf2\xc4!\x83\xecq)\x1fM\xab\x16\xfc\x04\xb2'\x86\x88!\"\xa3\x93c#\x8f\x8f\x9f@9\xf0\xdb\xdfP\xcb\x00w\x183L\xeeM\xb2&\x0f\xea\x01\xa3@D\xedw\x8e\xfez\xc2f\x10_\x92Z%\xd8z~|\xc8\tF\xa7,\x1c\x94M\xf8\u007fP\xce\xc0\xac\x1e!b\xce\x02ɝ\xe0\x15\x17\x1e\xe0'\x1f\x11\xc8M~\x84\xbdH\xe0q\xb3ّ,\v\xa7\xfd<'G\xf2\xb8)\xbbC\xdb$>\xf2\xc6\xe0\x01\xed\x86i\u05eb\xa8\xf7$\xa8%Eܨ@}\xa1\xee\xea\x1e\xcc\xe6\u007f\xb1\xad(\xbf>\xe3*\x8fy\x8aX\"\xb9\xddɇ\xb2\b/t \xef@\x1d\x84\x1aZ\xab8\n\x9dMY\x9dO?\xdc}\x86%uiƥ\xfaE\xf7c \x1f[\x90\x05#7a\xacM\x9c\xa2\x9f\v&:\x13<9)/\xda\x12\xbaK\xf99mg\x92\xdc\xf7\xdf\x13\xb2\xe4^\rpS\xae\x10l\x11R0J\xd0\f\xf0\xde\xc1\x8d\x9a\xd1\xde(\xc6\xff\xbc\x01Yi\uecf0_ւ\xd3\x03z\xe9\\U;]\xb0vޮ\xf4k}\x93\xef\x02\xea\xb3\x05\xca(4Q\xdb\xec\xc9\xc7\v]ղ\xe7\xebxÙ\xfb\xfa\x82C\xbd\xfe\x13\xed.\xad\x00ʘ\xf2ۡ\xec\xed\xd5\xd8\x17\x04[\xa9\xfb\xa6dʃ:\xf9\x98\x19\x1d\xc8`\xec\x97:\x1b\x93\x14[\xc1\x84\xd6\xf0\xf0\f\xf2\x8a\xe6\xad\xc8\x02\xf9\x9c\xe6\x19\x8f\xdb料d\xa1\x97\xb0z\xa1\xb0\x1d\xccr>\xd5\x0e\xaf1X\xa98O8E\xbc\xd8\xd5\xfe)\xc1\x17͎(I\xfc\xf5\xd3S\u009a\xe7\xb6M\x90N1\xa2\x93\x86\xb9ri\xff\x9d\t\n{\xc5\xf8\x0f\x9a\xafg\xb8͑K\x1b,M\xa8\x1f\xb5\xc5\n\b~Z\x99\xb6\xaf\xa2\x9c\x1fti~έ\x87w\aEVm-\xae|\xfb٩\xab_\xaf6\u007f\xb5\x9fό\x9cϩ\x19Ab\xaa\xd8mʚ\xe5\xd8}\xa55\x06A\xf3\xf1\xf2\xffЫWg\u007fiʫ\xf6\xae.+\x8f\xf0˯]EEs\xbf\xfc\x03\xc9ƿ\x03\x00\x00\xff\xffz{3\x1eK\n\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4WO\x8f۶\x13\xbd\xfbS\f\x92\xc3^\"9\xc1\xef\xf0+t)\x82M\x0fA\xf3g\x11o\xf7R\xf4@\x93#\x8b]\x8aTgHm\xddO_\f)\xad\xbd\xb6\x9cl\x8aV\x17C\x149|\xf3\u07bc!\xbd\xaa\xaaj\xa5\x06{\x87\xc46\xf8\x06\xd4`\xf1ψ^\u07b8\xbe\xff\x81k\x1b\xd6\xe3\x9bս\xf5\xa6\x81\xeb\xc41\xf4_\x90C\"\x8dﰵ\xdeF\x1b\xfc\xaaǨ\x8c\x8a\xaaY\x01(\xefCT2\xcc\xf2\n\xa0\x83\x8f\x14\x9cC\xaav\xe8\xeb\xfb\xb4\xc5m\xb2\xce \xe5\xe0\xf3\xd6\xe3\xeb\xfa\xff\xf5\xeb\x15\x80&\xcc\xcbom\x8f\x1cU?4\xe0\x93s+\x00\xafzl`\f.\xf5\xc8^\r܅\xe8\x82.\x9b\xd5#:\xa4P۰\xe2\x01\xb5콣\x90\x86\x06\x0e\x1fJ\x88\tW\xc9\xe9.G\xdbL\xd1>L\xd1\xf2\x04g9\xfe\xfc\x95I\x1f,\xc7\xe8\xcdK\x9a,\xcaWO\xb0ƽT\x11G\xb2~w\xf4!\x1b\xe1+\n\x88\aJ!\x94\xa5%\x8b\x03\xd12$\xec|\xf9is\v\xf3\xd6Y\x8cS\xf63\uf1c5|\x90@\b\xb3\xbeE*\"\xb6\x14\xfa\x1c\x13\xbd\x19\x82\xf51\xbfhgџ\xd2\xcfi\xdb\xdb(\xba\xff\x91\x90\xa3hU\xc3u\xeeB\xb0EH\x83Q\x11M\r\xef=\\\xab\x1eݵb\xfc\xcf\x05\x10\xa6\xb9\x12b\x9f'\xc1q\x03=\x9d\\X;6\xd8\xd4\xde.\xe8\xb5\xec\xe4̀\xfa\x89\x81$\x8am\xed\xe4\xec6\xd0\t\xafj\xf6\xf9r\xbc\xfa\xc9\xf4e\x83C\xe9\xfe\xadݝ\x8e\x02(c\xf2١\xdc\xcdŵ_!l!\xef뼓\x14j\x1bH\x10\x8d\xd6 Us\x9e\x13\x92DS\xc2\x16\x9d\xe1\xfa,\xe4\x05\xces*\x84F4V\xee\x1c\xe8S$\x8f\x13\xf3᧬/\x94\x1f\x02\xe4ң~\xea\xb1>\xa27\xb9\xa9\x9f\xa1\t\xb9\x86\x19\r<\xd8\xd8\x15s\xb8\xe3C\xeay*\xc8s\x8f\xfb\xa5\xe1\x13\xec\xb7\x1d\xca\xcc\xd2N\x11\x185a\x14\x1c\x8cN\xcc+ά\x01>&\xce\xf6R\x8b\x11AZ\x845\xf3\xea{ܟ\x13\r\xdf\x12w:\xef\xbf\r\xf9J\xce\xc5\x190a\x8b\x84>.Z\\\xee\x1e\xe41bv\xb9\t\x9a\xc5\xe0\x1a\x87\xc8\xeb0\"\x8d\x16\x1f\xd6\x0f\x81\xee\xad\xdfUBxU\n\x81\xd7\xf9ް~\x99\u007f.\xa4|\xfb\xf9\xdd\xe7\x06\xde\x1a\x03!vH\xa2Z\x9b\xdc\\hG\xa7ݫ\xdcq_A\xb2\xe6ǫ\u007f\xc2K\x18\x8as\x9e\xc1\xcd&W\xff^N\xee\fJ(\xda\x14U\x02\x81\xf4M\x11\xbb\x9f\xd4,\xfda\xa9\x10gL\xdb\x10\x1c\xaa\xf3ғ\xeek\t\xcd9\xa4Jv\xf8\x1e\x9b\xcd\xce\xfd\x86\xc9n\xa6ibx\xc9j^6\x17B\xb9\x97\xe4[\x8a\xda\xe1%\xa3/p\xbc\x9cJ\xf5\xb8\xc1\xb3ZtT1\xf1\xf77\xe9\xbcl\x9a\xb9\x9d\x1a\xb5N$\x05=\xc5\\\xb8\xd0\xfc;\x8dz\xe8\x14/\xb8\xed\x19\xa8od\xe5,\x83\xb3-\xea\xbdvX\x02Bh\x17\xaa\xe9\xbb ˃>\xf5K\xa5\xf5vT֩\xadÅo\xbfxu\xf1\xebE\xf1\x17\xf5<\x1bd\xb9\xb5\x98\x06\"\xa5\x12{\xaa\xb2i䠾\xd2\xd2\\\xd0|:\xfd\xdb\xf1\xe2œ\u007f\x0e\xf9U\a_\xceDn\xe0\xd7\xdfV%*\x9a\xbb\xf9\xa2/\x83\u007f\a\x00\x00\xff\xff\xe4\xf3S\x85\xb2\r\x00\x00"), } var CRDs = crds() diff --git a/design/plugin-versioning.md b/design/plugin-versioning.md index c43cecea3c..92279b9754 100644 --- a/design/plugin-versioning.md +++ b/design/plugin-versioning.md @@ -135,8 +135,11 @@ type ObjectStore interface { The proto service definitions of the plugins will also be versioned and arranged by their plugin kind. Currently, all the proto definitions reside under `pkg/plugin/proto` in a file corresponding to their plugin kind. -These files will be rearranged to be grouped by kind and then versioned: `pkg/plugin/proto//`. -The scripts to compile the proto service definitions will need to be updated to place the generated Go code under a matching directory structure. +These files will be rearranged to be grouped by kind and then versioned: `pkg/plugin/proto//`, +except for the current v1 plugins. Those will remain in their current package/location for backwards compatibility. +This will allow plugin images built with earlier versions of velero to work with the latest velero (for v1 plugins +only). The go_package option will be added to all proto service definitions to allow the proto compilation script +to place the generated go code for each plugin api version in the proper go package directory. It is not possible to import an existing proto service into a new one, so any methods will need to be duplicated across versions if they are required by the new version. The message definitions can be shared however, so these could be extracted from the service definition files and placed in a file that can be shared across all versions of the service. diff --git a/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md index 5b4a897e26..7bef16df89 100644 --- a/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md +++ b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md @@ -93,126 +93,140 @@ Velero by default uses the Unified Repository for all kinds of data movement, it ## The Unified Repository Interface Below are the definitions of the Unified Repository Interface. All the functions are synchronization functions. ``` -///BackupRepoService is used to initialize, open or maintain a backup repository +// BackupRepoService is used to initialize, open or maintain a backup repository type BackupRepoService interface { - ///Create a backup repository or connect to an existing backup repository - ///repoOption: option to the backup repository and the underlying backup storage - ///createNew: indicates whether to create a new or connect to an existing backup repository - ///result: the backup repository specific output that could be used to open the backup repository later - Init(ctx context.Context, repoOption RepoOptions, createNew bool) error - - ///Open an backup repository that has been created/connected - ///repoOption: options to open the backup repository and the underlying storage - Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error) - - ///Periodically called to maintain the backup repository to eliminate redundant data and improve performance - ///repoOption: options to maintain the backup repository - Maintain(ctx context.Context, repoOption RepoOptions) error + // Init creates a backup repository or connect to an existing backup repository. + // repoOption: option to the backup repository and the underlying backup storage. + // createNew: indicates whether to create a new or connect to an existing backup repository. + Init(ctx context.Context, repoOption RepoOptions, createNew bool) error + + // Open opens an backup repository that has been created/connected. + // repoOption: options to open the backup repository and the underlying storage. + Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error) + + // Maintain is periodically called to maintain the backup repository to eliminate redundant data. + // repoOption: options to maintain the backup repository. + Maintain(ctx context.Context, repoOption RepoOptions) error + + // DefaultMaintenanceFrequency returns the defgault frequency of maintenance, callers refer this + // frequency to maintain the backup repository to get the best maintenance performance + DefaultMaintenanceFrequency() time.Duration } -///BackupRepo provides the access to the backup repository +// BackupRepo provides the access to the backup repository type BackupRepo interface { - ///Open an existing object for read - ///id: the object's unified identifier - OpenObject(ctx context.Context, id ID) (ObjectReader, error) - - ///Get a manifest data - GetManifest(ctx context.Context, id ID, mani *RepoManifest) error - - ///Get one or more manifest data that match the given labels - FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error) - - ///Create a new object and return the object's writer interface - ///return: A unified identifier of the object on success - NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter - - ///Save a manifest object - PutManifest(ctx context.Context, mani RepoManifest) (ID, error) - - ///Delete a manifest object - DeleteManifest(ctx context.Context, id ID) error - - ///Flush all the backup repository data - Flush(ctx context.Context) error - - ///Get the local time of the backup repository. It may be different from the time of the caller - Time() time.Time - - ///Close the backup repository - Close(ctx context.Context) error -} + // OpenObject opens an existing object for read. + // id: the object's unified identifier. + OpenObject(ctx context.Context, id ID) (ObjectReader, error) + + // GetManifest gets a manifest data from the backup repository. + GetManifest(ctx context.Context, id ID, mani *RepoManifest) error + + // FindManifests gets one or more manifest data that match the given labels + FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error) + + // NewObjectWriter creates a new object and return the object's writer interface. + // return: A unified identifier of the object on success. + NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter + + // PutManifest saves a manifest object into the backup repository. + PutManifest(ctx context.Context, mani RepoManifest) (ID, error) + + // DeleteManifest deletes a manifest object from the backup repository. + DeleteManifest(ctx context.Context, id ID) error + + // Flush flushes all the backup repository data + Flush(ctx context.Context) error + + // Time returns the local time of the backup repository. It may be different from the time of the caller + Time() time.Time + + // Close closes the backup repository + Close(ctx context.Context) error type ObjectReader interface { - io.ReadCloser - io.Seeker - - ///Length returns the logical size of the object - Length() int64 + io.ReadCloser + io.Seeker + + // Length returns the logical size of the object + Length() int64 } type ObjectWriter interface { - io.WriteCloser - - ///For some cases, i.e. block incremental, the object is not written sequentially - io.Seeker - - // Periodically called to preserve the state of data written to the repo so far - // Return a unified identifier that represent the current state - // An empty ID could be returned on success if the backup repository doesn't support this - Checkpoint() (ID, error) - - ///Wait for the completion of the object write - ///Result returns the object's unified identifier after the write completes - Result() (ID, error) -} + io.WriteCloser + + // Seeker is used in the cases that the object is not written sequentially + io.Seeker + + // Checkpoint is periodically called to preserve the state of data written to the repo so far. + // Checkpoint returns a unified identifier that represent the current state. + // An empty ID could be returned on success if the backup repository doesn't support this. + Checkpoint() (ID, error) + + // Result waits for the completion of the object write. + // Result returns the object's unified identifier after the write completes. + Result() (ID, error) +} ``` Some data structure & constants used by the interfaces: -``` +``` type RepoOptions struct { - ///A repository specific string to identify a backup storage, i.e., "s3", "filesystem" - StorageType string - ///Backup repository password, if any - RepoPassword string - ///A custom path to save the repository's configuration, if any - ConfigFilePath string - ///Other repository specific options - GeneralOptions map[string]string - ///Storage specific options - StorageOptions map[string]string + // StorageType is a repository specific string to identify a backup storage, i.e., "s3", "filesystem" + StorageType string + // RepoPassword is the backup repository's password, if any + RepoPassword string + // ConfigFilePath is a custom path to save the repository's configuration, if any + ConfigFilePath string + // GeneralOptions takes other repository specific options + GeneralOptions map[string]string + // StorageOptions takes storage specific options + StorageOptions map[string]string + // Description is a description of the backup repository/backup repository operation. + // It is for logging/debugging purpose only and doesn't control any behavior of the backup repository. + Description string } -///ObjectWriteOptions defines the options when creating an object for write +// ObjectWriteOptions defines the options when creating an object for write type ObjectWriteOptions struct { - FullPath string ///Full logical path of the object - Description string ///A description of the object, could be empty - Prefix ID ///A prefix of the name used to save the object - AccessMode int ///OBJECT_DATA_ACCESS_* - BackupMode int ///OBJECT_DATA_BACKUP_* + FullPath string // Full logical path of the object + DataType int // OBJECT_DATA_TYPE_* + Description string // A description of the object, could be empty + Prefix ID // A prefix of the name used to save the object + AccessMode int // OBJECT_DATA_ACCESS_* + BackupMode int // OBJECT_DATA_BACKUP_* } const ( - ///Below consts defines the access mode when creating an object for write - OBJECT_DATA_ACCESS_MODE_UNKNOWN int = 0 - OBJECT_DATA_ACCESS_MODE_FILE int = 1 - OBJECT_DATA_ACCESS_MODE_BLOCK int = 2 - - OBJECT_DATA_BACKUP_MODE_UNKNOWN int = 0 - OBJECT_DATA_BACKUP_MODE_FULL int = 1 - OBJECT_DATA_BACKUP_MODE_INC int = 2 + // Below consts descrbe the data type of one object. + // Metadata: This type describes how the data is organized. + // For a file system backup, the Metadata describes a Dir or File. + // For a block backup, the Metadata describes a Disk and its incremental link. + ObjectDataTypeUnknown int = 0 + ObjectDataTypeMetadata int = 1 + ObjectDataTypeData int = 2 + + // Below consts defines the access mode when creating an object for write + ObjectDataAccessModeUnknown int = 0 + ObjectDataAccessModeFile int = 1 + ObjectDataAccessModeBlock int = 2 + + ObjectDataBackupModeUnknown int = 0 + ObjectDataBackupModeFull int = 1 + ObjectDataBackupModeInc int = 2 ) -///ManifestEntryMetadata is the metadata describing one manifest data +// ManifestEntryMetadata is the metadata describing one manifest data type ManifestEntryMetadata struct { - ID ID ///The ID of the manifest data - Length int32 ///The data size of the manifest data - Labels map[string]string ///Labels saved together with the manifest data - ModTime time.Time ///Modified time of the manifest data + ID ID // The ID of the manifest data + Length int32 // The data size of the manifest data + Labels map[string]string // Labels saved together with the manifest data + ModTime time.Time // Modified time of the manifest data } type RepoManifest struct { - Payload interface{} ///The user data of manifest - Metadata *ManifestEntryMetadata ///The metadata data of manifest + Payload interface{} // The user data of manifest + Metadata *ManifestEntryMetadata // The metadata data of manifest } type ManifestFilter struct { diff --git a/go.mod b/go.mod index 158ce888ad..75512d71ac 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/Azure/go-autorest/autorest v0.11.21 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/to v0.3.0 - github.com/apex/log v1.9.0 github.com/aws/aws-sdk-go v1.43.31 github.com/bombsimon/logrusr v1.1.0 github.com/evanphx/json-patch v4.11.0+incompatible @@ -42,12 +41,13 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/api v0.74.0 google.golang.org/grpc v1.45.0 + google.golang.org/protobuf v1.28.0 k8s.io/api v0.22.2 k8s.io/apiextensions-apiserver v0.22.2 k8s.io/apimachinery v0.22.2 k8s.io/cli-runtime v0.22.2 k8s.io/client-go v0.22.2 - k8s.io/klog v1.0.0 + k8s.io/klog/v2 v2.9.0 k8s.io/kube-aggregator v0.19.12 sigs.k8s.io/controller-runtime v0.10.2 sigs.k8s.io/yaml v1.3.0 @@ -57,6 +57,9 @@ require ( cloud.google.com/go v0.100.2 // indirect cloud.google.com/go/compute v1.5.0 // indirect cloud.google.com/go/iam v0.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.16 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect @@ -69,9 +72,11 @@ require ( github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-logr/logr v0.4.0 // indirect github.com/go-logr/zapr v0.4.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -93,6 +98,9 @@ require ( github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/minio-go/v7 v7.0.23 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -106,6 +114,7 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect + github.com/rs/xid v1.3.0 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/vladimirvivien/gexe v0.1.1 // indirect github.com/zeebo/blake3 v0.2.3 // indirect @@ -124,13 +133,12 @@ require ( gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/component-base v0.22.2 // indirect - k8s.io/klog/v2 v2.9.0 // indirect k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect diff --git a/go.sum b/go.sum index 58f63b5f24..75dc52473f 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,11 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v61.4.0+incompatible h1:BF2Pm3aQWIa6q9KmxyF1JYKYXtVw67vtvu2Wd54NGuY= github.com/Azure/azure-sdk-for-go v61.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk= github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= @@ -138,11 +141,6 @@ github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPp github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= -github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= -github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -150,12 +148,10 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.43.31 h1:yJZIr8nMV1hXjAvvOLUFqZRJcHV7udPQBfhJqawDzI0= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -231,10 +227,12 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -318,6 +316,7 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -498,7 +497,6 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -547,6 +545,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= @@ -563,8 +562,6 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= @@ -573,7 +570,6 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -583,12 +579,14 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.23 h1:NleyGQvAn9VQMU+YHVrgV4CX+EPtxPt/78lHOOTncy4= github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -649,7 +647,6 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -730,12 +727,12 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -743,7 +740,6 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.5.4/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -754,10 +750,7 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -803,13 +796,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/studio-b12/gowebdav v0.0.0-20211106090535-29e74efa701f/go.mod h1:gCcfDlA1Y7GqOaeEKw5l9dOGx1VLdc/HuQSlQAaZ30s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tg123/go-htpasswd v1.2.0/go.mod h1:h7IzlfpvIWnVJhNZ0nQ9HaFxHb7pn5uFJYLlEUJa2sM= -github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= -github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= -github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= -github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= -github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -901,7 +887,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1439,6 +1424,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -1458,7 +1444,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1501,8 +1486,6 @@ k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllun k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= diff --git a/hack/build-image/Dockerfile b/hack/build-image/Dockerfile index 5f35646310..5baa4365e2 100644 --- a/hack/build-image/Dockerfile +++ b/hack/build-image/Dockerfile @@ -45,11 +45,11 @@ RUN go get golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d929550 # get protoc compiler and golang plugin WORKDIR /root RUN apt-get update && apt-get install -y unzip -RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v3.9.1/protoc-3.9.1-linux-x86_64.zip && \ - unzip protoc-3.9.1-linux-x86_64.zip && \ +RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v3.14.0/protoc-3.14.0-linux-x86_64.zip && \ + unzip protoc-3.14.0-linux-x86_64.zip && \ mv bin/protoc /usr/bin/protoc && \ chmod +x /usr/bin/protoc -RUN go get github.com/golang/protobuf/protoc-gen-go@v1.0.0 +RUN go get github.com/golang/protobuf/protoc-gen-go@v1.4.3 # get goreleaser RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \ diff --git a/hack/update-fmt.sh b/hack/update-1fmt.sh similarity index 100% rename from hack/update-fmt.sh rename to hack/update-1fmt.sh diff --git a/hack/update-proto.sh b/hack/update-2proto.sh similarity index 86% rename from hack/update-proto.sh rename to hack/update-2proto.sh index 28ba964d17..43afa8d4ce 100755 --- a/hack/update-proto.sh +++ b/hack/update-2proto.sh @@ -18,6 +18,7 @@ HACK_DIR=$(dirname "${BASH_SOURCE}") echo "Updating plugin proto" -protoc pkg/plugin/proto/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ -I pkg/plugin/proto/ +echo protoc --version +protoc pkg/plugin/proto/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ --go_opt=module=github.com/vmware-tanzu/velero/pkg/plugin/generated -I pkg/plugin/proto/ echo "Updating plugin proto - done!" diff --git a/hack/update-generated-crd-code.sh b/hack/update-3generated-crd-code.sh similarity index 100% rename from hack/update-generated-crd-code.sh rename to hack/update-3generated-crd-code.sh diff --git a/hack/update-generated-issue-template.sh b/hack/update-4generated-issue-template.sh similarity index 100% rename from hack/update-generated-issue-template.sh rename to hack/update-4generated-issue-template.sh diff --git a/hack/verify-fmt.sh b/hack/verify-fmt.sh index e592ed0cfa..f3858cf24b 100755 --- a/hack/verify-fmt.sh +++ b/hack/verify-fmt.sh @@ -15,4 +15,4 @@ # limitations under the License. HACK_DIR=$(dirname "${BASH_SOURCE[0]}") -"${HACK_DIR}"/update-fmt.sh --verify +"${HACK_DIR}"/update-1fmt.sh --verify diff --git a/hack/verify-generated-crd-code.sh b/hack/verify-generated-crd-code.sh index d4c097c799..387acd74e7 100755 --- a/hack/verify-generated-crd-code.sh +++ b/hack/verify-generated-crd-code.sh @@ -16,7 +16,7 @@ HACK_DIR=$(dirname "${BASH_SOURCE}") -${HACK_DIR}/update-generated-crd-code.sh +${HACK_DIR}/update-3generated-crd-code.sh # ensure no changes to generated CRDs if ! git diff --exit-code config/crd/v1/crds/crds.go >/dev/null; then diff --git a/hack/verify-generated-issue-template.sh b/hack/verify-generated-issue-template.sh index 44e68e05a8..7d5cbf4efd 100755 --- a/hack/verify-generated-issue-template.sh +++ b/hack/verify-generated-issue-template.sh @@ -27,7 +27,7 @@ cleanup() { } echo "Verifying generated Github issue template" -${HACK_DIR}/update-generated-issue-template.sh ${OUT_TMP_FILE} > /dev/null +${HACK_DIR}/update-4generated-issue-template.sh ${OUT_TMP_FILE} > /dev/null output=$(echo "`diff ${ISSUE_TEMPLATE_FILE} ${OUT_TMP_FILE}`") if [[ -n "${output}" ]] ; then diff --git a/internal/delete/delete_item_action_handler.go b/internal/delete/delete_item_action_handler.go index b08c8e024e..bdcf511b24 100644 --- a/internal/delete/delete_item_action_handler.go +++ b/internal/delete/delete_item_action_handler.go @@ -48,7 +48,7 @@ type Context struct { func InvokeDeleteActions(ctx *Context) error { var err error resolver := framework.NewDeleteItemActionResolver(ctx.Actions) - ctx.resolvedActions, err = resolver.ResolveActions(ctx.DiscoveryHelper) + ctx.resolvedActions, err = resolver.ResolveActions(ctx.DiscoveryHelper, ctx.Log) // No actions installed and no error means we don't have to continue; // just do the backup deletion without worrying about plugins. if len(ctx.resolvedActions) == 0 && err == nil { diff --git a/internal/hook/item_hook_handler.go b/internal/hook/item_hook_handler.go index 83c756bd53..062922a056 100644 --- a/internal/hook/item_hook_handler.go +++ b/internal/hook/item_hook_handler.go @@ -36,7 +36,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/podexec" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/collections" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -126,7 +126,7 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks( // restored data to be consumed by the application container(s). // So if there is a "restic-wait" init container already on the pod at index 0, we'll preserve that and run // it before running any other init container. - if len(pod.Spec.InitContainers) > 0 && pod.Spec.InitContainers[0].Name == restic.InitContainer { + if len(pod.Spec.InitContainers) > 0 && pod.Spec.InitContainers[0].Name == podvolume.InitContainer { initContainers = append(initContainers, pod.Spec.InitContainers[0]) pod.Spec.InitContainers = pod.Spec.InitContainers[1:] } diff --git a/pkg/plugin/clientmgmt/restartable_delegate_test.go b/internal/restartabletest/restartable_delegate.go similarity index 58% rename from pkg/plugin/clientmgmt/restartable_delegate_test.go rename to internal/restartabletest/restartable_delegate.go index 3e3366c88e..f91a3eb37f 100644 --- a/pkg/plugin/clientmgmt/restartable_delegate_test.go +++ b/internal/restartabletest/restartable_delegate.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package restartabletest import ( "reflect" @@ -24,55 +24,83 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) -type restartableDelegateTest struct { - function string - inputs []interface{} - expectedErrorOutputs []interface{} - expectedDelegateOutputs []interface{} +type MockRestartableProcess struct { + mock.Mock } -type mockable interface { +func (rp *MockRestartableProcess) AddReinitializer(key process.KindAndName, r process.Reinitializer) { + rp.Called(key, r) +} + +func (rp *MockRestartableProcess) Reset() error { + args := rp.Called() + return args.Error(0) +} + +func (rp *MockRestartableProcess) ResetIfNeeded() error { + args := rp.Called() + return args.Error(0) +} + +func (rp *MockRestartableProcess) GetByKindAndName(key process.KindAndName) (interface{}, error) { + args := rp.Called(key) + return args.Get(0), args.Error(1) +} + +func (rp *MockRestartableProcess) Stop() { + rp.Called() +} + +type RestartableDelegateTest struct { + Function string + Inputs []interface{} + ExpectedErrorOutputs []interface{} + ExpectedDelegateOutputs []interface{} +} + +type Mockable interface { Test(t mock.TestingT) On(method string, args ...interface{}) *mock.Call AssertExpectations(t mock.TestingT) bool } -func runRestartableDelegateTests( +func RunRestartableDelegateTests( t *testing.T, - kind framework.PluginKind, - newRestartable func(key kindAndName, p RestartableProcess) interface{}, - newMock func() mockable, - tests ...restartableDelegateTest, + kind common.PluginKind, + newRestartable func(key process.KindAndName, p process.RestartableProcess) interface{}, + newMock func() Mockable, + tests ...RestartableDelegateTest, ) { for _, tc := range tests { - t.Run(tc.function, func(t *testing.T) { - p := new(mockRestartableProcess) + t.Run(tc.Function, func(t *testing.T) { + p := new(MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) // getDelegate error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "delegateName" - key := kindAndName{kind: kind, name: name} + key := process.KindAndName{Kind: kind, Name: name} r := newRestartable(key, p) // Get the method we're going to call using reflection - method := reflect.ValueOf(r).MethodByName(tc.function) + method := reflect.ValueOf(r).MethodByName(tc.Function) require.NotEmpty(t, method) // Convert the test case inputs ([]interface{}) to []reflect.Value var inputValues []reflect.Value - for i := range tc.inputs { - inputValues = append(inputValues, reflect.ValueOf(tc.inputs[i])) + for i := range tc.Inputs { + inputValues = append(inputValues, reflect.ValueOf(tc.Inputs[i])) } // Invoke the method being tested actual := method.Call(inputValues) - // This function asserts that the actual outputs match the expected outputs + // This Function asserts that the actual outputs match the expected outputs checkOutputs := func(expected []interface{}, actual []reflect.Value) { require.Equal(t, len(expected), len(actual)) @@ -90,7 +118,7 @@ func runRestartableDelegateTests( continue } - // If function returns nil as struct return type, we cannot just + // If Function returns nil as struct return type, we cannot just // compare the interface to nil as its type will not be nil, // only the value will be if expected[i] == nil && reflect.ValueOf(a).Kind() == reflect.Ptr { @@ -104,25 +132,25 @@ func runRestartableDelegateTests( } // Make sure we get what we expected when getDelegate returned an error - checkOutputs(tc.expectedErrorOutputs, actual) + checkOutputs(tc.ExpectedErrorOutputs, actual) // Invoke delegate, make sure all returned values are passed through - p.On("resetIfNeeded").Return(nil) + p.On("ResetIfNeeded").Return(nil) delegate := newMock() delegate.Test(t) defer delegate.AssertExpectations(t) - p.On("getByKindAndName", key).Return(delegate, nil) + p.On("GetByKindAndName", key).Return(delegate, nil) // Set up the mocked method in the delegate - delegate.On(tc.function, tc.inputs...).Return(tc.expectedDelegateOutputs...) + delegate.On(tc.Function, tc.Inputs...).Return(tc.ExpectedDelegateOutputs...) // Invoke the method being tested actual = method.Call(inputValues) // Make sure we get what we expected when invoking the delegate - checkOutputs(tc.expectedDelegateOutputs, actual) + checkOutputs(tc.ExpectedDelegateOutputs, actual) }) } } diff --git a/internal/velero/serverstatusrequest.go b/internal/velero/serverstatusrequest.go index a220c5c19e..c9c8acc0a5 100644 --- a/internal/velero/serverstatusrequest.go +++ b/internal/velero/serverstatusrequest.go @@ -19,17 +19,18 @@ package velero import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) type PluginLister interface { // List returns all PluginIdentifiers for kind. - List(kind framework.PluginKind) []framework.PluginIdentifier + List(kind common.PluginKind) []framework.PluginIdentifier } // GetInstalledPluginInfo returns a list of installed plugins func GetInstalledPluginInfo(pluginLister PluginLister) []velerov1api.PluginInfo { var plugins []velerov1api.PluginInfo - for _, v := range framework.AllPluginKinds() { + for _, v := range common.AllPluginKinds() { list := pluginLister.List(v) for _, plugin := range list { pluginInfo := velerov1api.PluginInfo{ diff --git a/pkg/apis/velero/v1/backup.go b/pkg/apis/velero/v1/backup.go index 232955a35d..f3b2725425 100644 --- a/pkg/apis/velero/v1/backup.go +++ b/pkg/apis/velero/v1/backup.go @@ -100,10 +100,18 @@ type BackupSpec struct { // DefaultVolumesToRestic specifies whether restic should be used to take a // backup of all pod volumes by default. + // + // Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead. // +optional - // + nullable + // +nullable DefaultVolumesToRestic *bool `json:"defaultVolumesToRestic,omitempty"` + // DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used + // for all volumes by default. + // +optional + // +nullable + DefaultVolumesToFsBackup *bool `json:"defaultVolumesToFsBackup,omitempty"` + // OrderedResources specifies the backup order of resources of specific Kind. // The map key is the Kind name and value is a list of resource names separated by commas. // Each resource name has format "namespace/resourcename". For cluster resources, simply use "resourcename". diff --git a/pkg/apis/velero/v1/backup_repository_types.go b/pkg/apis/velero/v1/backup_repository_types.go index a64e3be689..6a062c4fee 100644 --- a/pkg/apis/velero/v1/backup_repository_types.go +++ b/pkg/apis/velero/v1/backup_repository_types.go @@ -52,8 +52,8 @@ const ( BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready" BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady" - BackupRepositoryTypeRestic string = "restic" - BackupRepositoryTypeUnified string = "unified" + BackupRepositoryTypeRestic string = "restic" + BackupRepositoryTypeKopia string = "kopia" ) // BackupRepositoryStatus is the current status of a BackupRepository. diff --git a/pkg/apis/velero/v1/labels_annotations.go b/pkg/apis/velero/v1/labels_annotations.go index 172b436a83..64c83525a4 100644 --- a/pkg/apis/velero/v1/labels_annotations.go +++ b/pkg/apis/velero/v1/labels_annotations.go @@ -40,16 +40,19 @@ const ( // PodVolumeOperationTimeoutAnnotation is the annotation key used to apply // a backup/restore-specific timeout value for pod volume operations (i.e. - // restic backups/restores). + // pod volume backups/restores). PodVolumeOperationTimeoutAnnotation = "velero.io/pod-volume-timeout" // StorageLocationLabel is the label key used to identify the storage // location of a backup. StorageLocationLabel = "velero.io/storage-location" - // ResticVolumeNamespaceLabel is the label key used to identify which - // namespace a restic repository stores pod volume backups for. - ResticVolumeNamespaceLabel = "velero.io/volume-namespace" + // VolumeNamespaceLabel is the label key used to identify which + // namespace a repository stores backups for. + VolumeNamespaceLabel = "velero.io/volume-namespace" + + // RepositoryTypeLabel is the label key used to identify the type of a repository + RepositoryTypeLabel = "velero.io/repository-type" // SourceClusterK8sVersionAnnotation is the label key used to identify the k8s // git version of the backup , i.e. v1.16.4 diff --git a/pkg/apis/velero/v1/pod_volume_restore_type.go b/pkg/apis/velero/v1/pod_volume_restore_type.go index e0370da637..72c3b891c5 100644 --- a/pkg/apis/velero/v1/pod_volume_restore_type.go +++ b/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -43,6 +43,9 @@ type PodVolumeRestoreSpec struct { // SnapshotID is the ID of the volume snapshot to be restored. SnapshotID string `json:"snapshotID"` + + // SourceNamespace is the original namespace for namaspace mapping. + SourceNamespace string `json:"sourceNamespace"` } // PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. diff --git a/pkg/apis/velero/v1/schedule_types.go b/pkg/apis/velero/v1/schedule_types.go index 077a375ecf..6cb553b9a8 100644 --- a/pkg/apis/velero/v1/schedule_types.go +++ b/pkg/apis/velero/v1/schedule_types.go @@ -38,6 +38,10 @@ type ScheduleSpec struct { // +optional // +nullable UseOwnerReferencesInBackup *bool `json:"useOwnerReferencesInBackup,omitempty"` + + // Paused specifies whether the schedule is paused or not + // +optional + Paused bool `json:"paused,omitempty"` } // SchedulePhase is a string representation of the lifecycle phase @@ -87,6 +91,7 @@ type ScheduleStatus struct { // +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.schedule",description="A Cron expression defining when to run the Backup" // +kubebuilder:printcolumn:name="LastBackup",type="date",JSONPath=".status.lastBackup",description="The last time a Backup was run for this schedule" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".spec.paused" // Schedule is a Velero resource that represents a pre-scheduled or // periodic Backup that should be run. diff --git a/pkg/apis/velero/v1/volume_snapshot_location_type.go b/pkg/apis/velero/v1/volume_snapshot_location_type.go index 505e1d994a..836701b774 100644 --- a/pkg/apis/velero/v1/volume_snapshot_location_type.go +++ b/pkg/apis/velero/v1/volume_snapshot_location_type.go @@ -16,7 +16,10 @@ limitations under the License. package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -61,6 +64,10 @@ type VolumeSnapshotLocationSpec struct { // Config is for provider-specific configuration fields. // +optional Config map[string]string `json:"config,omitempty"` + + // Credential contains the credential information intended to be used with this location + // +optional + Credential *corev1api.SecretKeySelector `json:"credential,omitempty"` } // VolumeSnapshotLocationPhase is the lifecycle phase of a Velero VolumeSnapshotLocation. diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index 7cf271e8f8..8c488dd4f6 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -337,6 +337,11 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = new(bool) **out = **in } + if in.DefaultVolumesToFsBackup != nil { + in, out := &in.DefaultVolumesToFsBackup, &out.DefaultVolumesToFsBackup + *out = new(bool) + **out = **in + } if in.OrderedResources != nil { in, out := &in.OrderedResources, &out.OrderedResources *out = make(map[string]string, len(*in)) @@ -1656,6 +1661,11 @@ func (in *VolumeSnapshotLocationSpec) DeepCopyInto(out *VolumeSnapshotLocationSp (*out)[key] = val } } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationSpec. diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index d026b09cff..0b00417fdd 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -44,7 +44,8 @@ import ( velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/framework" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podexec" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" @@ -62,7 +63,7 @@ const BackupFormatVersion = "1.1.0" type Backupper interface { // Backup takes a backup using the specification in the velerov1api.Backup and writes backup and log data // to the given writers. - Backup(logger logrus.FieldLogger, backup *Request, backupFile io.Writer, actions []velero.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error + Backup(logger logrus.FieldLogger, backup *Request, backupFile io.Writer, actions []biav1.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error BackupWithResolvers(log logrus.FieldLogger, backupRequest *Request, backupFile io.Writer, backupItemActionResolver framework.BackupItemActionResolver, itemSnapshotterResolver framework.ItemSnapshotterResolver, volumeSnapshotterGetter VolumeSnapshotterGetter) error @@ -70,14 +71,15 @@ type Backupper interface { // kubernetesBackupper implements Backupper. type kubernetesBackupper struct { - backupClient velerov1client.BackupsGetter - dynamicFactory client.DynamicFactory - discoveryHelper discovery.Helper - podCommandExecutor podexec.PodCommandExecutor - resticBackupperFactory podvolume.BackupperFactory - resticTimeout time.Duration - defaultVolumesToRestic bool - clientPageSize int + backupClient velerov1client.BackupsGetter + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + podCommandExecutor podexec.PodCommandExecutor + resticBackupperFactory podvolume.BackupperFactory + resticTimeout time.Duration + defaultVolumesToFsBackup bool + clientPageSize int + uploaderType string } func (i *itemKey) String() string { @@ -102,18 +104,20 @@ func NewKubernetesBackupper( podCommandExecutor podexec.PodCommandExecutor, resticBackupperFactory podvolume.BackupperFactory, resticTimeout time.Duration, - defaultVolumesToRestic bool, + defaultVolumesToFsBackup bool, clientPageSize int, + uploaderType string, ) (Backupper, error) { return &kubernetesBackupper{ - backupClient: backupClient, - discoveryHelper: discoveryHelper, - dynamicFactory: dynamicFactory, - podCommandExecutor: podCommandExecutor, - resticBackupperFactory: resticBackupperFactory, - resticTimeout: resticTimeout, - defaultVolumesToRestic: defaultVolumesToRestic, - clientPageSize: clientPageSize, + backupClient: backupClient, + discoveryHelper: discoveryHelper, + dynamicFactory: dynamicFactory, + podCommandExecutor: podCommandExecutor, + resticBackupperFactory: resticBackupperFactory, + resticTimeout: resticTimeout, + defaultVolumesToFsBackup: defaultVolumesToFsBackup, + clientPageSize: clientPageSize, + uploaderType: uploaderType, }, nil } @@ -161,7 +165,7 @@ func getResourceHook(hookSpec velerov1api.BackupResourceHookSpec, discoveryHelpe } type VolumeSnapshotterGetter interface { - GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) + GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) } // Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file @@ -170,7 +174,7 @@ type VolumeSnapshotterGetter interface { // back up individual resources that don't prevent the backup from continuing to be processed) are logged // to the backup log. func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Request, backupFile io.Writer, - actions []velero.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error { + actions []biav1.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error { backupItemActions := framework.NewBackupItemActionResolver(actions) itemSnapshotters := framework.NewItemSnapshotterResolver(nil) return kb.BackupWithResolvers(log, backupRequest, backupFile, backupItemActions, itemSnapshotters, @@ -201,21 +205,24 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, backupRequest.ResourceIncludesExcludes = collections.GetResourceIncludesExcludes(kb.discoveryHelper, backupRequest.Spec.IncludedResources, backupRequest.Spec.ExcludedResources) log.Infof("Including resources: %s", backupRequest.ResourceIncludesExcludes.IncludesString()) log.Infof("Excluding resources: %s", backupRequest.ResourceIncludesExcludes.ExcludesString()) - log.Infof("Backing up all pod volumes using Restic: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToRestic)) + log.Infof("Backing up all volumes using pod volume backup: %t", boolptr.IsSetToTrue(backupRequest.Backup.Spec.DefaultVolumesToFsBackup)) var err error backupRequest.ResourceHooks, err = getResourceHooks(backupRequest.Spec.Hooks.Resources, kb.discoveryHelper) if err != nil { + log.WithError(errors.WithStack(err)).Debugf("Error from getResourceHooks") return err } - backupRequest.ResolvedActions, err = backupItemActionResolver.ResolveActions(kb.discoveryHelper) + backupRequest.ResolvedActions, err = backupItemActionResolver.ResolveActions(kb.discoveryHelper, log) if err != nil { + log.WithError(errors.WithStack(err)).Debugf("Error from backupItemActionResolver.ResolveActions") return err } - backupRequest.ResolvedItemSnapshotters, err = itemSnapshotterResolver.ResolveActions(kb.discoveryHelper) + backupRequest.ResolvedItemSnapshotters, err = itemSnapshotterResolver.ResolveActions(kb.discoveryHelper, log) if err != nil { + log.WithError(errors.WithStack(err)).Debugf("Error from itemSnapshotterResolver.ResolveActions") return err } @@ -236,8 +243,9 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, var resticBackupper podvolume.Backupper if kb.resticBackupperFactory != nil { - resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backupRequest.Backup) + resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backupRequest.Backup, kb.uploaderType) if err != nil { + log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper") return errors.WithStack(err) } } diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index cf6a4269f8..387c91c4ac 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -47,6 +47,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/test" testutil "github.com/vmware-tanzu/velero/pkg/test" @@ -1360,7 +1362,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) { h.addItems(t, resource) } - actions := []velero.BackupItemAction{} + actions := []biav1.BackupItemAction{} for action := range tc.actions { actions = append(actions, action) } @@ -1386,7 +1388,7 @@ func TestBackupWithInvalidActions(t *testing.T) { name string backup *velerov1.Backup apiResources []*test.APIResource - actions []velero.BackupItemAction + actions []biav1.BackupItemAction }{ { name: "action with invalid label selector results in an error", @@ -1402,7 +1404,7 @@ func TestBackupWithInvalidActions(t *testing.T) { builder.ForPersistentVolume("baz").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ new(recordResourcesAction).ForLabelSelector("=invalid-selector"), }, }, @@ -1420,7 +1422,7 @@ func TestBackupWithInvalidActions(t *testing.T) { builder.ForPersistentVolume("baz").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &appliesToErrorAction{}, }, }, @@ -1482,7 +1484,7 @@ func TestBackupActionModifications(t *testing.T) { name string backup *velerov1.Backup apiResources []*test.APIResource - actions []velero.BackupItemAction + actions []biav1.BackupItemAction want map[string]unstructuredObject }{ { @@ -1493,7 +1495,7 @@ func TestBackupActionModifications(t *testing.T) { builder.ForPod("ns-1", "pod-1").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetLabels(map[string]string{"updated": "true"}) }), @@ -1510,7 +1512,7 @@ func TestBackupActionModifications(t *testing.T) { builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("should-be-removed", "true")).Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetLabels(nil) }), @@ -1527,7 +1529,7 @@ func TestBackupActionModifications(t *testing.T) { builder.ForPod("ns-1", "pod-1").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.Object["spec"].(map[string]interface{})["nodeName"] = "foo" }), @@ -1545,7 +1547,7 @@ func TestBackupActionModifications(t *testing.T) { builder.ForPod("ns-1", "pod-1").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetName(item.GetName() + "-updated") item.SetNamespace(item.GetNamespace() + "-updated") @@ -1586,7 +1588,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { name string backup *velerov1.Backup apiResources []*test.APIResource - actions []velero.BackupItemAction + actions []biav1.BackupItemAction want []string }{ { @@ -1599,7 +1601,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { @@ -1631,7 +1633,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { additionalItems := []velero.ResourceIdentifier{ @@ -1661,7 +1663,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { additionalItems := []velero.ResourceIdentifier{ @@ -1694,7 +1696,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { additionalItems := []velero.ResourceIdentifier{ @@ -1724,7 +1726,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { additionalItems := []velero.ResourceIdentifier{ @@ -1755,7 +1757,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { additionalItems := []velero.ResourceIdentifier{ @@ -1785,7 +1787,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []velero.BackupItemAction{ + actions: []biav1.BackupItemAction{ &pluggableAction{ selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { @@ -1830,10 +1832,10 @@ func TestBackupActionAdditionalItems(t *testing.T) { } // volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter -// interface that returns velero.VolumeSnapshotters from a map if they exist. -type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter +// interface that returns vsv1.VolumeSnapshotters from a map if they exist. +type volumeSnapshotterGetter map[string]vsv1.VolumeSnapshotter -func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { +func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) { snapshotter, ok := vsg[name] if !ok { return nil, errors.New("volume snapshotter not found") @@ -1858,7 +1860,7 @@ type volumeInfo struct { snapshotErr bool } -// fakeVolumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface. +// fakeVolumeSnapshotter is a test fake for the vsv1.VolumeSnapshotter interface. type fakeVolumeSnapshotter struct { // PVVolumeNames is a map from PV name to volume ID, used as the basis // for the GetVolumeID method. @@ -1981,7 +1983,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), }, want: []*volume.Snapshot{ @@ -2014,7 +2016,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("failure-domain.beta.kubernetes.io/zone", "zone-1")).Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false), }, want: []*volume.Snapshot{ @@ -2048,7 +2050,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("topology.kubernetes.io/zone", "zone-1")).Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false), }, want: []*volume.Snapshot{ @@ -2082,7 +2084,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabelsMap(map[string]string{"failure-domain.beta.kubernetes.io/zone": "zone-1-deprecated", "topology.kubernetes.io/zone": "zone-1-ga"})).Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1-ga", "type-1", 100, false), }, want: []*volume.Snapshot{ @@ -2116,7 +2118,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, true), }, want: []*volume.Snapshot{ @@ -2148,7 +2150,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), }, want: nil, @@ -2163,7 +2165,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), }, want: nil, @@ -2181,7 +2183,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{}, + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{}, want: nil, }, { @@ -2197,7 +2199,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-1").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter), }, want: nil, @@ -2217,7 +2219,7 @@ func TestBackupWithSnapshots(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), "another": new(fakeVolumeSnapshotter).WithVolume("pv-2", "vol-2", "", "type-2", 100, false), }, @@ -2595,7 +2597,7 @@ func TestBackupWithHooks(t *testing.T) { type fakeResticBackupperFactory struct{} -func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (podvolume.Backupper, error) { +func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup, string) (podvolume.Backupper, error) { return &fakeResticBackupper{}, nil } @@ -2685,7 +2687,7 @@ func TestBackupWithRestic(t *testing.T) { ), }, vsl: newSnapshotLocation("velero", "default", "default"), - snapshotterGetter: map[string]velero.VolumeSnapshotter{ + snapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "default": new(fakeVolumeSnapshotter). WithVolume("pv-1", "vol-1", "", "type-1", 100, false). WithVolume("pv-2", "vol-2", "", "type-1", 100, false), @@ -2805,7 +2807,7 @@ func newSnapshotLocation(ns, name, provider string) *velerov1.VolumeSnapshotLoca } func defaultBackup() *builder.BackupBuilder { - return builder.ForBackup(velerov1.DefaultNamespace, "backup-1").DefaultVolumesToRestic(false) + return builder.ForBackup(velerov1.DefaultNamespace, "backup-1").DefaultVolumesToFsBackup(false) } func toUnstructuredOrFail(t *testing.T, obj interface{}) map[string]interface{} { diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index df5d48cd26..879f4d70c5 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -41,7 +41,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/kuberesource" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/volume" @@ -58,7 +58,7 @@ type itemBackupper struct { volumeSnapshotterGetter VolumeSnapshotterGetter itemHookHandler hook.ItemHookHandler - snapshotLocationVolumeSnapshotters map[string]velero.VolumeSnapshotter + snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter } const ( @@ -146,10 +146,10 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr // nil it on error since it's not valid pod = nil } else { - // Get the list of volumes to back up using restic from the pod's annotations. Remove from this list + // Get the list of volumes to back up using pod volume backup from the pod's annotations. Remove from this list // any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario, // where it's been backed up from another pod), since we don't need >1 backup per PVC. - for _, volume := range podvolume.GetPodVolumesUsingRestic(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToRestic)) { + for _, volume := range podvolume.GetVolumesByPod(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup)) { if found, pvcName := ib.resticSnapshotTracker.HasPVCForPodVolume(pod, volume); found { log.WithFields(map[string]interface{}{ "podVolume": volume, @@ -357,7 +357,7 @@ func (ib *itemBackupper) executeActions( // volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation, // or returns an existing one if one's already been initialized for the location. -func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (velero.VolumeSnapshotter, error) { +func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (vsv1.VolumeSnapshotter, error) { if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok { return bs, nil } @@ -372,7 +372,7 @@ func (ib *itemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeS } if ib.snapshotLocationVolumeSnapshotters == nil { - ib.snapshotLocationVolumeSnapshotters = make(map[string]velero.VolumeSnapshotter) + ib.snapshotLocationVolumeSnapshotters = make(map[string]vsv1.VolumeSnapshotter) } ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name] = bs @@ -447,7 +447,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie var ( volumeID, location string - volumeSnapshotter velero.VolumeSnapshotter + volumeSnapshotter vsv1.VolumeSnapshotter ) for _, snapshotLocation := range ib.backupRequest.SnapshotLocations { diff --git a/pkg/backup/item_collector.go b/pkg/backup/item_collector.go index 5cbc178361..474fbabee3 100644 --- a/pkg/backup/item_collector.go +++ b/pkg/backup/item_collector.go @@ -225,8 +225,11 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes) - // Check if we're backing up namespaces, and only certain ones - if gr == kuberesource.Namespaces && namespacesToList[0] != "" { + // Check if we're backing up namespaces for a less-than-full backup. + // We enter this block if resource is Namespaces and the namespae list is either empty or contains + // an explicit namespace list. (We skip this block if the list contains "" since that indicates + // a full-cluster backup + if gr == kuberesource.Namespaces && (len(namespacesToList) == 0 || namespacesToList[0] != "") { resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "") if err != nil { log.WithError(err).Error("Error getting dynamic client") diff --git a/pkg/backup/request.go b/pkg/backup/request.go index 38cd499177..69dbeca704 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -50,7 +50,7 @@ type Request struct { VolumeSnapshots []*volume.Snapshot PodVolumeBackups []*velerov1api.PodVolumeBackup BackedUpItems map[itemKey]struct{} - CSISnapshots []*snapshotv1api.VolumeSnapshot + CSISnapshots []snapshotv1api.VolumeSnapshot } // BackupResourceList returns the list of backed up resources grouped by the API diff --git a/pkg/builder/backup_builder.go b/pkg/builder/backup_builder.go index 73a994b465..f18ce49092 100644 --- a/pkg/builder/backup_builder.go +++ b/pkg/builder/backup_builder.go @@ -174,6 +174,12 @@ func (b *BackupBuilder) SnapshotVolumes(val bool) *BackupBuilder { return b } +// DefaultVolumesToFsBackup sets the Backup's "DefaultVolumesToFsBackup" flag. +func (b *BackupBuilder) DefaultVolumesToFsBackup(val bool) *BackupBuilder { + b.object.Spec.DefaultVolumesToFsBackup = &val + return b +} + // DefaultVolumesToRestic sets the Backup's "DefaultVolumesToRestic" flag. func (b *BackupBuilder) DefaultVolumesToRestic(val bool) *BackupBuilder { b.object.Spec.DefaultVolumesToRestic = &val diff --git a/pkg/builder/volume_snapshot_location_builder.go b/pkg/builder/volume_snapshot_location_builder.go index 1862045e0b..af94471e36 100644 --- a/pkg/builder/volume_snapshot_location_builder.go +++ b/pkg/builder/volume_snapshot_location_builder.go @@ -1,5 +1,5 @@ /* -Copyright 2019 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ package builder import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1api "k8s.io/api/core/v1" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) @@ -62,3 +64,9 @@ func (b *VolumeSnapshotLocationBuilder) Provider(name string) *VolumeSnapshotLoc b.object.Spec.Provider = name return b } + +// Credential sets the VolumeSnapshotLocation's credential selector. +func (b *VolumeSnapshotLocationBuilder) Credential(selector *corev1api.SecretKeySelector) *VolumeSnapshotLocationBuilder { + b.object.Spec.Credential = selector + return b +} diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 3c2c23838d..52aeab3d52 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -82,23 +82,23 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { } type CreateOptions struct { - Name string - TTL time.Duration - SnapshotVolumes flag.OptionalBool - DefaultVolumesToRestic flag.OptionalBool - IncludeNamespaces flag.StringArray - ExcludeNamespaces flag.StringArray - IncludeResources flag.StringArray - ExcludeResources flag.StringArray - Labels flag.Map - Selector flag.LabelSelector - IncludeClusterResources flag.OptionalBool - Wait bool - StorageLocation string - SnapshotLocations []string - FromSchedule string - OrderedResources string - CSISnapshotTimeout time.Duration + Name string + TTL time.Duration + SnapshotVolumes flag.OptionalBool + DefaultVolumesToFsBackup flag.OptionalBool + IncludeNamespaces flag.StringArray + ExcludeNamespaces flag.StringArray + IncludeResources flag.StringArray + ExcludeResources flag.StringArray + Labels flag.Map + Selector flag.LabelSelector + IncludeClusterResources flag.OptionalBool + Wait bool + StorageLocation string + SnapshotLocations []string + FromSchedule string + OrderedResources string + CSISnapshotTimeout time.Duration client veleroclient.Interface } @@ -124,7 +124,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.VarP(&o.Selector, "selector", "l", "Only back up resources matching this label selector.") flags.StringVar(&o.OrderedResources, "ordered-resources", "", "Mapping Kinds to an ordered list of specific resources of that Kind. Resource names are separated by commas and their names are in format 'namespace/resourcename'. For cluster scope resource, simply use resource name. Key-value pairs in the mapping are separated by semi-colon. Example: 'pods=ns1/pod1,ns1/pod2;persistentvolumeclaims=ns1/pvc4,ns1/pvc8'. Optional.") flags.DurationVar(&o.CSISnapshotTimeout, "csi-snapshot-timeout", o.CSISnapshotTimeout, "How long to wait for CSI snapshot creation before timeout.") - f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup.") + f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup. If the parameter is not set, it is treated as setting to 'true'.") // this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true" // like a normal bool flag f.NoOptDefVal = "true" @@ -132,7 +132,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the backup") f.NoOptDefVal = "true" - f = flags.VarPF(&o.DefaultVolumesToRestic, "default-volumes-to-restic", "", "Use restic by default to backup all pod volumes") + f = flags.VarPF(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", "", "Use pod volume file system backup by default for volumes") f.NoOptDefVal = "true" } @@ -350,8 +350,8 @@ func (o *CreateOptions) BuildBackup(namespace string) (*velerov1api.Backup, erro if o.IncludeClusterResources.Value != nil { backupBuilder.IncludeClusterResources(*o.IncludeClusterResources.Value) } - if o.DefaultVolumesToRestic.Value != nil { - backupBuilder.DefaultVolumesToRestic(*o.DefaultVolumesToRestic.Value) + if o.DefaultVolumesToFsBackup.Value != nil { + backupBuilder.DefaultVolumesToFsBackup(*o.DefaultVolumesToFsBackup.Value) } } diff --git a/pkg/cmd/cli/delete_options.go b/pkg/cmd/cli/delete_options.go index fbdcb0d0a9..80fd292228 100644 --- a/pkg/cmd/cli/delete_options.go +++ b/pkg/cmd/cli/delete_options.go @@ -27,24 +27,20 @@ import ( "github.com/spf13/pflag" "github.com/vmware-tanzu/velero/pkg/client" - "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" ) // DeleteOptions contains parameters used for deleting a restore. type DeleteOptions struct { - Names []string - all bool - Selector flag.LabelSelector - Confirm bool - Client clientset.Interface - Namespace string - singularTypeName string + *SelectOptions + Confirm bool + Client clientset.Interface + Namespace string } func NewDeleteOptions(singularTypeName string) *DeleteOptions { o := &DeleteOptions{} - o.singularTypeName = singularTypeName + o.SelectOptions = NewSelectOptions("delete", singularTypeName) return o } @@ -56,8 +52,7 @@ func (o *DeleteOptions) Complete(f client.Factory, args []string) error { return err } o.Client = client - o.Names = args - return nil + return o.SelectOptions.Complete(args) } // Validate validates the fields of the DeleteOptions struct. @@ -65,23 +60,14 @@ func (o *DeleteOptions) Validate(c *cobra.Command, f client.Factory, args []stri if o.Client == nil { return errors.New("Velero client is not set; unable to proceed") } - var ( - hasNames = len(o.Names) > 0 - hasAll = o.all - hasSelector = o.Selector.LabelSelector != nil - ) - if !xor(hasNames, hasAll, hasSelector) { - return errors.New("you must specify exactly one of: specific " + o.singularTypeName + " name(s), the --all flag, or the --selector flag") - } - return nil + return o.SelectOptions.Validate() } // BindFlags binds options for this command to flags. func (o *DeleteOptions) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.Confirm, "confirm", o.Confirm, "Confirm deletion") - flags.BoolVar(&o.all, "all", o.all, "Delete all "+o.singularTypeName+"s") - flags.VarP(&o.Selector, "selector", "l", "Delete all "+o.singularTypeName+"s matching this label selector.") + o.SelectOptions.BindFlags(flags) } // GetConfirmation ensures that the user confirms the action before proceeding. diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index a908ff23f6..fe4bebea30 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -43,40 +43,40 @@ import ( // InstallOptions collects all the options for installing Velero into a Kubernetes cluster. type InstallOptions struct { - Namespace string - Image string - BucketName string - Prefix string - ProviderName string - PodAnnotations flag.Map - PodLabels flag.Map - ServiceAccountAnnotations flag.Map - VeleroPodCPURequest string - VeleroPodMemRequest string - VeleroPodCPULimit string - VeleroPodMemLimit string - ResticPodCPURequest string - ResticPodMemRequest string - ResticPodCPULimit string - ResticPodMemLimit string - RestoreOnly bool - SecretFile string - NoSecret bool - DryRun bool - BackupStorageConfig flag.Map - VolumeSnapshotConfig flag.Map - UseRestic bool - Wait bool - UseVolumeSnapshots bool - DefaultResticMaintenanceFrequency time.Duration - GarbageCollectionFrequency time.Duration - Plugins flag.StringArray - NoDefaultBackupLocation bool - CRDsOnly bool - CACertFile string - Features string - DefaultVolumesToRestic bool - UploaderType string + Namespace string + Image string + BucketName string + Prefix string + ProviderName string + PodAnnotations flag.Map + PodLabels flag.Map + ServiceAccountAnnotations flag.Map + VeleroPodCPURequest string + VeleroPodMemRequest string + VeleroPodCPULimit string + VeleroPodMemLimit string + ResticPodCPURequest string + ResticPodMemRequest string + ResticPodCPULimit string + ResticPodMemLimit string + RestoreOnly bool + SecretFile string + NoSecret bool + DryRun bool + BackupStorageConfig flag.Map + VolumeSnapshotConfig flag.Map + UseRestic bool + Wait bool + UseVolumeSnapshots bool + DefaultRepoMaintenanceFrequency time.Duration + GarbageCollectionFrequency time.Duration + Plugins flag.StringArray + NoDefaultBackupLocation bool + CRDsOnly bool + CACertFile string + Features string + DefaultVolumesToFsBackup bool + UploaderType string } // BindFlags adds command line values to the options struct. @@ -106,13 +106,13 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Generate resources, but don't send them to the cluster. Use with -o. Optional.") flags.BoolVar(&o.UseRestic, "use-restic", o.UseRestic, "Create restic daemonset. Optional.") flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.") - flags.DurationVar(&o.DefaultResticMaintenanceFrequency, "default-restic-prune-frequency", o.DefaultResticMaintenanceFrequency, "How often 'restic prune' is run for restic repositories by default. Optional.") + flags.DurationVar(&o.DefaultRepoMaintenanceFrequency, "default-repo-maintain-frequency", o.DefaultRepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default. Optional.") flags.DurationVar(&o.GarbageCollectionFrequency, "garbage-collection-frequency", o.GarbageCollectionFrequency, "How often the garbage collection runs for expired backups.(default 1h)") flags.Var(&o.Plugins, "plugins", "Plugin container images to install into the Velero Deployment") flags.BoolVar(&o.CRDsOnly, "crds-only", o.CRDsOnly, "Only generate CustomResourceDefinition resources. Useful for updating CRDs for an existing Velero install.") flags.StringVar(&o.CACertFile, "cacert", o.CACertFile, "File containing a certificate bundle to use when verifying TLS connections to the object store. Optional.") flags.StringVar(&o.Features, "features", o.Features, "Comma separated list of Velero feature flags to be set on the Velero deployment and the restic daemonset, if restic is enabled") - flags.BoolVar(&o.DefaultVolumesToRestic, "default-volumes-to-restic", o.DefaultVolumesToRestic, "Bool flag to configure Velero server to use restic by default to backup all pod volumes on all backups. Optional.") + flags.BoolVar(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", o.DefaultVolumesToFsBackup, "Bool flag to configure Velero server to use pod volume file system backup by default for all volumes on all backups. Optional.") flags.StringVar(&o.UploaderType, "uploader-type", o.UploaderType, fmt.Sprintf("The type of uploader to transfer the data of pod volumes, the supported values are '%s', '%s'", uploader.ResticType, uploader.KopiaType)) } @@ -135,11 +135,11 @@ func NewInstallOptions() *InstallOptions { ResticPodCPULimit: install.DefaultResticPodCPULimit, ResticPodMemLimit: install.DefaultResticPodMemLimit, // Default to creating a VSL unless we're told otherwise - UseVolumeSnapshots: true, - NoDefaultBackupLocation: false, - CRDsOnly: false, - DefaultVolumesToRestic: false, - UploaderType: uploader.ResticType, + UseVolumeSnapshots: true, + NoDefaultBackupLocation: false, + CRDsOnly: false, + DefaultVolumesToFsBackup: false, + UploaderType: uploader.ResticType, } } @@ -177,30 +177,30 @@ func (o *InstallOptions) AsVeleroOptions() (*install.VeleroOptions, error) { } return &install.VeleroOptions{ - Namespace: o.Namespace, - Image: o.Image, - ProviderName: o.ProviderName, - Bucket: o.BucketName, - Prefix: o.Prefix, - PodAnnotations: o.PodAnnotations.Data(), - PodLabels: o.PodLabels.Data(), - ServiceAccountAnnotations: o.ServiceAccountAnnotations.Data(), - VeleroPodResources: veleroPodResources, - ResticPodResources: resticPodResources, - SecretData: secretData, - RestoreOnly: o.RestoreOnly, - UseRestic: o.UseRestic, - UseVolumeSnapshots: o.UseVolumeSnapshots, - BSLConfig: o.BackupStorageConfig.Data(), - VSLConfig: o.VolumeSnapshotConfig.Data(), - DefaultResticMaintenanceFrequency: o.DefaultResticMaintenanceFrequency, - GarbageCollectionFrequency: o.GarbageCollectionFrequency, - Plugins: o.Plugins, - NoDefaultBackupLocation: o.NoDefaultBackupLocation, - CACertData: caCertData, - Features: strings.Split(o.Features, ","), - DefaultVolumesToRestic: o.DefaultVolumesToRestic, - UploaderType: o.UploaderType, + Namespace: o.Namespace, + Image: o.Image, + ProviderName: o.ProviderName, + Bucket: o.BucketName, + Prefix: o.Prefix, + PodAnnotations: o.PodAnnotations.Data(), + PodLabels: o.PodLabels.Data(), + ServiceAccountAnnotations: o.ServiceAccountAnnotations.Data(), + VeleroPodResources: veleroPodResources, + ResticPodResources: resticPodResources, + SecretData: secretData, + RestoreOnly: o.RestoreOnly, + UseRestic: o.UseRestic, + UseVolumeSnapshots: o.UseVolumeSnapshots, + BSLConfig: o.BackupStorageConfig.Data(), + VSLConfig: o.VolumeSnapshotConfig.Data(), + DefaultRepoMaintenanceFrequency: o.DefaultRepoMaintenanceFrequency, + GarbageCollectionFrequency: o.GarbageCollectionFrequency, + Plugins: o.Plugins, + NoDefaultBackupLocation: o.NoDefaultBackupLocation, + CACertData: caCertData, + Features: strings.Split(o.Features, ","), + DefaultVolumesToFsBackup: o.DefaultVolumesToFsBackup, + UploaderType: o.UploaderType, }, nil } @@ -393,8 +393,8 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact } } - if o.DefaultVolumesToRestic && !o.UseRestic { - return errors.New("--use-restic is required when using --default-volumes-to-restic") + if o.DefaultVolumesToFsBackup && !o.UseRestic { + return errors.New("--use-restic is required when using --default-volumes-to-fs-backup") } switch { @@ -404,8 +404,8 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact return errors.New("Cannot use both --secret-file and --no-secret") } - if o.DefaultResticMaintenanceFrequency < 0 { - return errors.New("--default-restic-prune-frequency must be non-negative") + if o.DefaultRepoMaintenanceFrequency < 0 { + return errors.New("--default-repo-maintain-frequency must be non-negative") } if o.GarbageCollectionFrequency < 0 { diff --git a/pkg/cmd/cli/restic/server.go b/pkg/cmd/cli/restic/server.go index ae593306b0..e7b133ac69 100644 --- a/pkg/cmd/cli/restic/server.go +++ b/pkg/cmd/cli/restic/server.go @@ -24,7 +24,6 @@ import ( "strings" "time" - "github.com/apex/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" @@ -52,7 +51,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/cmd/util/signals" "github.com/vmware-tanzu/velero/pkg/controller" "github.com/vmware-tanzu/velero/pkg/metrics" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -197,22 +195,27 @@ func (s *resticServer) run() { s.logger.Fatalf("Failed to create credentials file store: %v", err) } + credSecretStore, err := credentials.NewNamespacedSecretStore(s.mgr.GetClient(), s.namespace) + if err != nil { + s.logger.Fatalf("Failed to create secret file store: %v", err) + } + + credentialGetter := &credentials.CredentialGetter{FromFile: credentialFileStore, FromSecret: credSecretStore} pvbReconciler := controller.PodVolumeBackupReconciler{ - Scheme: s.mgr.GetScheme(), - Client: s.mgr.GetClient(), - Clock: clock.RealClock{}, - Metrics: s.metrics, - CredsFileStore: credentialFileStore, - NodeName: s.nodeName, - FileSystem: filesystem.NewFileSystem(), - ResticExec: restic.BackupExec{}, - Log: s.logger, + Scheme: s.mgr.GetScheme(), + Client: s.mgr.GetClient(), + Clock: clock.RealClock{}, + Metrics: s.metrics, + CredentialGetter: credentialGetter, + NodeName: s.nodeName, + FileSystem: filesystem.NewFileSystem(), + Log: s.logger, } if err := pvbReconciler.SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.PodVolumeBackup) } - if err = controller.NewPodVolumeRestoreReconciler(s.logger, s.mgr.GetClient(), credentialFileStore).SetupWithManager(s.mgr); err != nil { + if err = controller.NewPodVolumeRestoreReconciler(s.logger, s.mgr.GetClient(), credentialGetter).SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller") } @@ -276,7 +279,7 @@ func (s *resticServer) markInProgressCRsFailed() { // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) if err != nil { - log.WithError(errors.WithStack(err)).Error("failed to create client") + s.logger.WithError(errors.WithStack(err)).Error("failed to create client") return } @@ -288,16 +291,16 @@ func (s *resticServer) markInProgressCRsFailed() { func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvbs := &velerov1api.PodVolumeBackupList{} if err := client.List(s.ctx, pvbs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { - log.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") + s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") return } for _, pvb := range pvbs.Items { if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress { - log.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase) + s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase) continue } if pvb.Spec.Node != s.nodeName { - log.Debugf("the node of podvolumebackup %q is %q, not %q, skip", pvb.GetName(), pvb.Spec.Node, s.nodeName) + s.logger.Debugf("the node of podvolumebackup %q is %q, not %q, skip", pvb.GetName(), pvb.Spec.Node, s.nodeName) continue } original := pvb.DeepCopy() @@ -305,22 +308,22 @@ func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase) pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { - log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName()) + s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName()) continue } - log.WithField("podvolumebackup", pvb.GetName()).Warn(pvb.Status.Message) + s.logger.WithField("podvolumebackup", pvb.GetName()).Warn(pvb.Status.Message) } } func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvrs := &velerov1api.PodVolumeRestoreList{} if err := client.List(s.ctx, pvrs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { - log.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") + s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") return } for _, pvr := range pvrs.Items { if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress { - log.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase) + s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase) continue } @@ -329,12 +332,12 @@ func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) { Namespace: pvr.Spec.Pod.Namespace, Name: pvr.Spec.Pod.Name, }, pod); err != nil { - log.WithError(errors.WithStack(err)).Errorf("failed to get pod \"%s/%s\" of podvolumerestore %q", + s.logger.WithError(errors.WithStack(err)).Errorf("failed to get pod \"%s/%s\" of podvolumerestore %q", pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name, pvr.GetName()) continue } if pod.Spec.NodeName != s.nodeName { - log.Debugf("the node of pod referenced by podvolumebackup %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName) + s.logger.Debugf("the node of pod referenced by podvolumebackup %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName) continue } @@ -343,9 +346,9 @@ func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase) pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { - log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName()) + s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName()) continue } - log.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message) + s.logger.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message) } } diff --git a/pkg/cmd/cli/restore/describe.go b/pkg/cmd/cli/restore/describe.go index 3a8c797a61..76ef3a43b0 100644 --- a/pkg/cmd/cli/restore/describe.go +++ b/pkg/cmd/cli/restore/describe.go @@ -28,7 +28,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" "github.com/vmware-tanzu/velero/pkg/cmd/util/output" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/label" ) func NewDescribeCommand(f client.Factory, use string) *cobra.Command { @@ -69,7 +69,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { first := true for _, restore := range restores.Items { - opts := restic.NewPodVolumeRestoreListOptions(restore.Name) + opts := newPodVolumeRestoreListOptions(restore.Name) podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts) if err != nil { fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) @@ -94,3 +94,11 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { return c } + +// newPodVolumeRestoreListOptions creates a ListOptions with a label selector configured to +// find PodVolumeRestores for the restore identified by name. +func newPodVolumeRestoreListOptions(name string) metav1.ListOptions { + return metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", velerov1api.RestoreNameLabel, label.GetValidName(name)), + } +} diff --git a/pkg/cmd/cli/schedule/create.go b/pkg/cmd/cli/schedule/create.go index e32a71c0cd..abd819d3bf 100644 --- a/pkg/cmd/cli/schedule/create.go +++ b/pkg/cmd/cli/schedule/create.go @@ -82,6 +82,7 @@ type CreateOptions struct { BackupOptions *backup.CreateOptions Schedule string UseOwnerReferencesInBackup bool + Paused bool labelSelector *metav1.LabelSelector } @@ -96,6 +97,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { o.BackupOptions.BindFlags(flags) flags.StringVar(&o.Schedule, "schedule", o.Schedule, "A cron expression specifying a recurring schedule for this backup to run") flags.BoolVar(&o.UseOwnerReferencesInBackup, "use-owner-references-in-backup", o.UseOwnerReferencesInBackup, "Specifies whether to use OwnerReferences on backups created by this Schedule. Notice: if set to true, when schedule is deleted, backups will be deleted too.") + flags.BoolVar(&o.Paused, "paused", o.Paused, "Specifies whether the newly created schedule is paused or not.") } func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { @@ -133,22 +135,23 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { }, Spec: api.ScheduleSpec{ Template: api.BackupSpec{ - IncludedNamespaces: o.BackupOptions.IncludeNamespaces, - ExcludedNamespaces: o.BackupOptions.ExcludeNamespaces, - IncludedResources: o.BackupOptions.IncludeResources, - ExcludedResources: o.BackupOptions.ExcludeResources, - IncludeClusterResources: o.BackupOptions.IncludeClusterResources.Value, - LabelSelector: o.BackupOptions.Selector.LabelSelector, - SnapshotVolumes: o.BackupOptions.SnapshotVolumes.Value, - TTL: metav1.Duration{Duration: o.BackupOptions.TTL}, - StorageLocation: o.BackupOptions.StorageLocation, - VolumeSnapshotLocations: o.BackupOptions.SnapshotLocations, - DefaultVolumesToRestic: o.BackupOptions.DefaultVolumesToRestic.Value, - OrderedResources: orders, - CSISnapshotTimeout: metav1.Duration{Duration: o.BackupOptions.CSISnapshotTimeout}, + IncludedNamespaces: o.BackupOptions.IncludeNamespaces, + ExcludedNamespaces: o.BackupOptions.ExcludeNamespaces, + IncludedResources: o.BackupOptions.IncludeResources, + ExcludedResources: o.BackupOptions.ExcludeResources, + IncludeClusterResources: o.BackupOptions.IncludeClusterResources.Value, + LabelSelector: o.BackupOptions.Selector.LabelSelector, + SnapshotVolumes: o.BackupOptions.SnapshotVolumes.Value, + TTL: metav1.Duration{Duration: o.BackupOptions.TTL}, + StorageLocation: o.BackupOptions.StorageLocation, + VolumeSnapshotLocations: o.BackupOptions.SnapshotLocations, + DefaultVolumesToFsBackup: o.BackupOptions.DefaultVolumesToFsBackup.Value, + OrderedResources: orders, + CSISnapshotTimeout: metav1.Duration{Duration: o.BackupOptions.CSISnapshotTimeout}, }, Schedule: o.Schedule, UseOwnerReferencesInBackup: &o.UseOwnerReferencesInBackup, + Paused: o.Paused, }, } diff --git a/pkg/cmd/cli/schedule/pause.go b/pkg/cmd/cli/schedule/pause.go new file mode 100644 index 0000000000..e8aa3fe82e --- /dev/null +++ b/pkg/cmd/cli/schedule/pause.go @@ -0,0 +1,122 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schedule + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kubeerrs "k8s.io/apimachinery/pkg/util/errors" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/client" + "github.com/vmware-tanzu/velero/pkg/cmd" + "github.com/vmware-tanzu/velero/pkg/cmd/cli" +) + +// NewPauseCommand creates the command for pause +func NewPauseCommand(f client.Factory, use string) *cobra.Command { + o := cli.NewSelectOptions("pause", "schedule") + + c := &cobra.Command{ + Use: use, + Short: "Pause schedules", + Example: ` # Pause a schedule named "schedule-1". + velero schedule pause schedule-1 + + # Pause schedules named "schedule-1" and "schedule-2". + velero schedule pause schedule-1 schedule-2 + + # Pause all schedules labelled with "foo=bar". + velero schedule pause --selector foo=bar + + # Pause all schedules. + velero schedule pause --all`, + Run: func(c *cobra.Command, args []string) { + cmd.CheckError(o.Complete(args)) + cmd.CheckError(o.Validate()) + cmd.CheckError(runPause(f, o, true)) + }, + } + + o.BindFlags(c.Flags()) + + return c +} + +func runPause(f client.Factory, o *cli.SelectOptions, paused bool) error { + client, err := f.Client() + if err != nil { + return err + } + + var ( + schedules []*velerov1api.Schedule + errs []error + ) + switch { + case len(o.Names) > 0: + for _, name := range o.Names { + schedule, err := client.VeleroV1().Schedules(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + errs = append(errs, errors.WithStack(err)) + continue + } + schedules = append(schedules, schedule) + } + default: + selector := labels.Everything().String() + if o.Selector.LabelSelector != nil { + selector = o.Selector.String() + } + res, err := client.VeleroV1().Schedules(f.Namespace()).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + errs = append(errs, errors.WithStack(err)) + } + + for i := range res.Items { + schedules = append(schedules, &res.Items[i]) + } + } + if len(schedules) == 0 { + fmt.Println("No schedules found") + return nil + } + + msg := "paused" + if !paused { + msg = "unpaused" + } + for _, schedule := range schedules { + if schedule.Spec.Paused == paused { + fmt.Printf("Schedule %s is already %s, skip\n", schedule.Name, msg) + continue + } + schedule.Spec.Paused = paused + if _, err := client.VeleroV1().Schedules(schedule.Namespace).Update(context.TODO(), schedule, metav1.UpdateOptions{}); err != nil { + return errors.Wrapf(err, "failed to update schedule %s", schedule.Name) + } + fmt.Printf("Schedule %s %s successfully\n", schedule.Name, msg) + } + return kubeerrs.NewAggregate(errs) +} diff --git a/pkg/cmd/cli/schedule/schedule.go b/pkg/cmd/cli/schedule/schedule.go index 274a2ad799..85c51c3999 100644 --- a/pkg/cmd/cli/schedule/schedule.go +++ b/pkg/cmd/cli/schedule/schedule.go @@ -34,6 +34,8 @@ func NewCommand(f client.Factory) *cobra.Command { NewGetCommand(f, "get"), NewDescribeCommand(f, "describe"), NewDeleteCommand(f, "delete"), + NewPauseCommand(f, "pause"), + NewUnpauseCommand(f, "unpause"), ) return c diff --git a/pkg/cmd/cli/schedule/unpause.go b/pkg/cmd/cli/schedule/unpause.go new file mode 100644 index 0000000000..8a9e467e3d --- /dev/null +++ b/pkg/cmd/cli/schedule/unpause.go @@ -0,0 +1,55 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schedule + +import ( + "github.com/spf13/cobra" + + "github.com/vmware-tanzu/velero/pkg/client" + "github.com/vmware-tanzu/velero/pkg/cmd" + "github.com/vmware-tanzu/velero/pkg/cmd/cli" +) + +// NewUnpauseCommand creates the command for unpause +func NewUnpauseCommand(f client.Factory, use string) *cobra.Command { + o := cli.NewSelectOptions("pause", "schedule") + + c := &cobra.Command{ + Use: use, + Short: "Unpause schedules", + Example: ` # Unpause a schedule named "schedule-1". + velero schedule unpause schedule-1 + + # Unpause schedules named "schedule-1" and "schedule-2". + velero schedule unpause schedule-1 schedule-2 + + # Unpause all schedules labelled with "foo=bar". + velero schedule unpause --selector foo=bar + + # Unpause all schedules. + velero schedule unpause --all`, + Run: func(c *cobra.Command, args []string) { + cmd.CheckError(o.Complete(args)) + cmd.CheckError(o.Validate()) + cmd.CheckError(runPause(f, o, false)) + }, + } + + o.BindFlags(c.Flags()) + + return c +} diff --git a/pkg/cmd/cli/select_option.go b/pkg/cmd/cli/select_option.go new file mode 100644 index 0000000000..45ed547168 --- /dev/null +++ b/pkg/cmd/cli/select_option.go @@ -0,0 +1,69 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "errors" + "strings" + + "github.com/spf13/pflag" + + "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" +) + +// SelectOptions defines the options for selecting resources +type SelectOptions struct { + Names []string + All bool + Selector flag.LabelSelector + CMD string + SingularTypeName string +} + +// NewSelectOptions creates a new option for selector +func NewSelectOptions(cmd, singularTypeName string) *SelectOptions { + return &SelectOptions{ + CMD: cmd, + SingularTypeName: singularTypeName, + } +} + +// Complete fills in the correct values for all the options. +func (o *SelectOptions) Complete(args []string) error { + o.Names = args + return nil +} + +// Validate validates the fields of the SelectOptions struct. +func (o *SelectOptions) Validate() error { + var ( + hasNames = len(o.Names) > 0 + hasAll = o.All + hasSelector = o.Selector.LabelSelector != nil + ) + if !xor(hasNames, hasAll, hasSelector) { + return errors.New("you must specify exactly one of: specific " + o.SingularTypeName + " name(s), the --all flag, or the --selector flag") + } + + return nil +} + +// BindFlags binds options for this command to flags. +func (o *SelectOptions) BindFlags(flags *pflag.FlagSet) { + flags.BoolVar(&o.All, "all", o.All, strings.Title(o.CMD)+" all "+o.SingularTypeName+"s") + flags.VarP(&o.Selector, "selector", "l", strings.Title(o.CMD)+" all "+o.SingularTypeName+"s matching this label selector.") +} diff --git a/pkg/cmd/cli/select_option_test.go b/pkg/cmd/cli/select_option_test.go new file mode 100644 index 0000000000..b555f2bebb --- /dev/null +++ b/pkg/cmd/cli/select_option_test.go @@ -0,0 +1,45 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" +) + +func TestCompleteOfSelectOption(t *testing.T) { + option := &SelectOptions{} + args := []string{"arg1", "arg2"} + require.Nil(t, option.Complete(args)) + assert.Equal(t, args, option.Names) +} + +func TestValidateOfSelectOption(t *testing.T) { + option := &SelectOptions{ + Names: nil, + Selector: flag.LabelSelector{}, + All: false, + } + assert.NotNil(t, option.Validate()) + + option.All = true + assert.Nil(t, option.Validate()) +} diff --git a/pkg/cmd/cli/snapshotlocation/create.go b/pkg/cmd/cli/snapshotlocation/create.go index 2de6b28277..b0e5e2f09e 100644 --- a/pkg/cmd/cli/snapshotlocation/create.go +++ b/pkg/cmd/cli/snapshotlocation/create.go @@ -1,5 +1,5 @@ /* -Copyright 2020 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" @@ -54,16 +55,18 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { } type CreateOptions struct { - Name string - Provider string - Config flag.Map - Labels flag.Map + Name string + Provider string + Config flag.Map + Labels flag.Map + Credential flag.Map } func NewCreateOptions() *CreateOptions { return &CreateOptions{ - Config: flag.NewMap(), - Labels: flag.NewMap(), + Config: flag.NewMap(), + Labels: flag.NewMap(), + Credential: flag.NewMap(), } } @@ -71,6 +74,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.Provider, "provider", o.Provider, "Name of the volume snapshot provider (e.g. aws, azure, gcp).") flags.Var(&o.Config, "config", "Configuration key-value pairs.") flags.Var(&o.Labels, "labels", "Labels to apply to the volume snapshot location.") + flags.Var(&o.Credential, "credential", "The credential to be used by this location as a key-value pair, where the key is the Kubernetes Secret name, and the value is the data key name within the Secret. Optional, one value only.") } func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { @@ -82,6 +86,10 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto return errors.New("--provider is required") } + if len(o.Credential.Data()) > 1 { + return errors.New("--credential can only contain 1 key/value pair") + } + return nil } @@ -90,10 +98,10 @@ func (o *CreateOptions) Complete(args []string, f client.Factory) error { return nil } -func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { +func (o *CreateOptions) BuildVolumeSnapshotLocation(namespace string) *api.VolumeSnapshotLocation { volumeSnapshotLocation := &api.VolumeSnapshotLocation{ ObjectMeta: metav1.ObjectMeta{ - Namespace: f.Namespace(), + Namespace: namespace, Name: o.Name, Labels: o.Labels.Data(), }, @@ -102,6 +110,15 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { Config: o.Config.Data(), }, } + for secretName, secretKey := range o.Credential.Data() { + volumeSnapshotLocation.Spec.Credential = builder.ForSecretKeySelector(secretName, secretKey).Result() + break + } + return volumeSnapshotLocation +} + +func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { + volumeSnapshotLocation := o.BuildVolumeSnapshotLocation(f.Namespace()) if printed, err := output.PrintWithFormat(c, volumeSnapshotLocation); printed || err != nil { return err diff --git a/pkg/cmd/cli/snapshotlocation/set.go b/pkg/cmd/cli/snapshotlocation/set.go index 8515426610..f6b8ac3688 100644 --- a/pkg/cmd/cli/snapshotlocation/set.go +++ b/pkg/cmd/cli/snapshotlocation/set.go @@ -27,8 +27,10 @@ import ( kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" + "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" "github.com/vmware-tanzu/velero/pkg/cmd/util/output" ) @@ -39,9 +41,6 @@ func NewSetCommand(f client.Factory, use string) *cobra.Command { Use: use + " NAME", Short: "Set specific features for a snapshot location", Args: cobra.ExactArgs(1), - // Mark this command as hidden until more functionality is added - // as part of https://github.com/vmware-tanzu/velero/issues/2426 - Hidden: true, Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(args, f)) cmd.CheckError(o.Validate(c, args, f)) @@ -54,14 +53,18 @@ func NewSetCommand(f client.Factory, use string) *cobra.Command { } type SetOptions struct { - Name string + Name string + Credential flag.Map } func NewSetOptions() *SetOptions { - return &SetOptions{} + return &SetOptions{ + Credential: flag.NewMap(), + } } -func (o *SetOptions) BindFlags(*pflag.FlagSet) { +func (o *SetOptions) BindFlags(flags *pflag.FlagSet) { + flags.Var(&o.Credential, "credential", "Sets the credential to be used by this location as a key-value pair, where the key is the Kubernetes Secret name, and the value is the data key name within the Secret. Optional, one value only.") } func (o *SetOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { @@ -69,6 +72,10 @@ func (o *SetOptions) Validate(c *cobra.Command, args []string, f client.Factory) return err } + if len(o.Credential.Data()) > 1 { + return errors.New("--credential can only contain 1 key/value pair") + } + return nil } @@ -92,6 +99,11 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error { return errors.WithStack(err) } + for name, key := range o.Credential.Data() { + location.Spec.Credential = builder.ForSecretKeySelector(name, key).Result() + break + } + if err := kbClient.Update(context.Background(), location, &kbclient.UpdateOptions{}); err != nil { return errors.WithStack(err) } diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 5dec03856b..64dbe4572a 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -28,6 +28,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/client" velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery" veleroplugin "github.com/vmware-tanzu/velero/pkg/plugin/framework" + plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/restore" ) @@ -76,7 +77,7 @@ func newPodBackupItemAction(logger logrus.FieldLogger) (interface{}, error) { return backup.NewPodAction(logger), nil } -func newServiceAccountBackupItemAction(f client.Factory) veleroplugin.HandlerInitializer { +func newServiceAccountBackupItemAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { // TODO(ncdc): consider a k8s style WantsKubernetesClientSet initialization approach clientset, err := f.KubeClient() @@ -101,7 +102,7 @@ func newServiceAccountBackupItemAction(f client.Factory) veleroplugin.HandlerIni } } -func newRemapCRDVersionAction(f client.Factory) veleroplugin.HandlerInitializer { +func newRemapCRDVersionAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { config, err := f.ClientConfig() if err != nil { @@ -138,7 +139,7 @@ func newInitRestoreHookPodAction(logger logrus.FieldLogger) (interface{}, error) return restore.NewInitRestoreHookPodAction(logger), nil } -func newResticRestoreItemAction(f client.Factory) veleroplugin.HandlerInitializer { +func newResticRestoreItemAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { client, err := f.KubeClient() if err != nil { @@ -174,7 +175,7 @@ func newCRDV1PreserveUnknownFieldsItemAction(logger logrus.FieldLogger) (interfa return restore.NewCRDV1PreserveUnknownFieldsAction(logger), nil } -func newChangeStorageClassRestoreItemAction(f client.Factory) veleroplugin.HandlerInitializer { +func newChangeStorageClassRestoreItemAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { client, err := f.KubeClient() if err != nil { @@ -197,7 +198,7 @@ func newClusterRoleBindingItemAction(logger logrus.FieldLogger) (interface{}, er return restore.NewClusterRoleBindingAction(logger), nil } -func newChangePVCNodeSelectorItemAction(f client.Factory) veleroplugin.HandlerInitializer { +func newChangePVCNodeSelectorItemAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { client, err := f.KubeClient() if err != nil { diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 96dc264e8f..ccf9f1edd1 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -35,7 +35,6 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" corev1api "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" @@ -50,9 +49,9 @@ import ( snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" snapshotv1informers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/vmware-tanzu/velero/internal/credentials" + "github.com/vmware-tanzu/velero/internal/storage" "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/buildinfo" "github.com/vmware-tanzu/velero/pkg/client" @@ -68,8 +67,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" "github.com/vmware-tanzu/velero/pkg/podexec" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/restore" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -79,12 +78,13 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "github.com/vmware-tanzu/velero/internal/storage" "github.com/vmware-tanzu/velero/internal/util/managercontroller" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/repository" repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" + + "github.com/vmware-tanzu/velero/pkg/nodeagent" ) const ( @@ -128,9 +128,9 @@ type serverConfig struct { clientPageSize int profilerAddress string formatFlag *logging.FormatFlag - defaultResticMaintenanceFrequency time.Duration + repoMaintenanceFrequency time.Duration garbageCollectionFrequency time.Duration - defaultVolumesToRestic bool + defaultVolumesToFsBackup bool uploaderType string } @@ -144,25 +144,24 @@ func NewCommand(f client.Factory) *cobra.Command { volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(':') logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel) config = serverConfig{ - pluginDir: "/plugins", - metricsAddress: defaultMetricsAddress, - defaultBackupLocation: "default", - defaultVolumeSnapshotLocations: make(map[string]string), - backupSyncPeriod: defaultBackupSyncPeriod, - defaultBackupTTL: defaultBackupTTL, - defaultCSISnapshotTimeout: defaultCSISnapshotTimeout, - storeValidationFrequency: defaultStoreValidationFrequency, - podVolumeOperationTimeout: defaultPodVolumeOperationTimeout, - restoreResourcePriorities: defaultRestorePriorities, - clientQPS: defaultClientQPS, - clientBurst: defaultClientBurst, - clientPageSize: defaultClientPageSize, - profilerAddress: defaultProfilerAddress, - resourceTerminatingTimeout: defaultResourceTerminatingTimeout, - formatFlag: logging.NewFormatFlag(), - defaultResticMaintenanceFrequency: restic.DefaultMaintenanceFrequency, - defaultVolumesToRestic: restic.DefaultVolumesToRestic, - uploaderType: uploader.ResticType, + pluginDir: "/plugins", + metricsAddress: defaultMetricsAddress, + defaultBackupLocation: "default", + defaultVolumeSnapshotLocations: make(map[string]string), + backupSyncPeriod: defaultBackupSyncPeriod, + defaultBackupTTL: defaultBackupTTL, + defaultCSISnapshotTimeout: defaultCSISnapshotTimeout, + storeValidationFrequency: defaultStoreValidationFrequency, + podVolumeOperationTimeout: defaultPodVolumeOperationTimeout, + restoreResourcePriorities: defaultRestorePriorities, + clientQPS: defaultClientQPS, + clientBurst: defaultClientBurst, + clientPageSize: defaultClientPageSize, + profilerAddress: defaultProfilerAddress, + resourceTerminatingTimeout: defaultResourceTerminatingTimeout, + formatFlag: logging.NewFormatFlag(), + defaultVolumesToFsBackup: podvolume.DefaultVolumesToFsBackup, + uploaderType: uploader.ResticType, } ) @@ -212,7 +211,7 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "Directory containing Velero plugins") command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "The address to expose prometheus metrics") command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "How often to ensure all Velero backups in object storage exist as Backup API objects in the cluster. This is the default sync period if none is explicitly specified for a backup storage location.") - command.Flags().DurationVar(&config.podVolumeOperationTimeout, "restic-timeout", config.podVolumeOperationTimeout, "How long backups/restores of pod volumes should be allowed to run before timing out.") + command.Flags().DurationVar(&config.podVolumeOperationTimeout, "fs-backup-timeout", config.podVolumeOperationTimeout, "How long pod volume file system backups/restores should be allowed to run before timing out.") command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "Run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled. DEPRECATED: this flag will be removed in v2.0. Use read-only backup storage locations instead.") command.Flags().StringSliceVar(&config.disabledControllers, "disable-controllers", config.disabledControllers, fmt.Sprintf("List of controllers to disable on startup. Valid values are %s", strings.Join(controller.DisableableControllers, ","))) command.Flags().StringSliceVar(&config.restoreResourcePriorities, "restore-resource-priorities", config.restoreResourcePriorities, "Desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources.") @@ -225,9 +224,9 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().StringVar(&config.profilerAddress, "profiler-address", config.profilerAddress, "The address to expose the pprof profiler.") command.Flags().DurationVar(&config.resourceTerminatingTimeout, "terminating-resource-timeout", config.resourceTerminatingTimeout, "How long to wait on persistent volumes and namespaces to terminate during a restore before timing out.") command.Flags().DurationVar(&config.defaultBackupTTL, "default-backup-ttl", config.defaultBackupTTL, "How long to wait by default before backups can be garbage collected.") - command.Flags().DurationVar(&config.defaultResticMaintenanceFrequency, "default-restic-prune-frequency", config.defaultResticMaintenanceFrequency, "How often 'restic prune' is run for restic repositories by default.") + command.Flags().DurationVar(&config.repoMaintenanceFrequency, "default-repo-maintain-frequency", config.repoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default.") command.Flags().DurationVar(&config.garbageCollectionFrequency, "garbage-collection-frequency", config.garbageCollectionFrequency, "How often garbage collection is run for expired backups.") - command.Flags().BoolVar(&config.defaultVolumesToRestic, "default-volumes-to-restic", config.defaultVolumesToRestic, "Backup all volumes with restic by default.") + command.Flags().BoolVar(&config.defaultVolumesToFsBackup, "default-volumes-to-fs-backup", config.defaultVolumesToFsBackup, "Backup all volumes with pod volume file system backup by default.") command.Flags().StringVar(&config.uploaderType, "uploader-type", config.uploaderType, "Type of uploader to handle the transfer of data of pod volumes") return command @@ -249,7 +248,7 @@ type server struct { cancelFunc context.CancelFunc logger logrus.FieldLogger logLevel logrus.Level - pluginRegistry clientmgmt.Registry + pluginRegistry process.Registry repoManager repository.Manager repoLocker *repository.RepoLocker repoEnsurer *repository.RepositoryEnsurer @@ -257,6 +256,7 @@ type server struct { config serverConfig mgr manager.Manager credentialFileStore credentials.FileStore + credentialSecretStore credentials.SecretStore } func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) { @@ -293,7 +293,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } - pluginRegistry := clientmgmt.NewRegistry(config.pluginDir, logger, logger.Level) + pluginRegistry := process.NewRegistry(config.pluginDir, logger, logger.Level) if err := pluginRegistry.DiscoverPlugins(); err != nil { return nil, err } @@ -346,6 +346,8 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } + credentialSecretStore, err := credentials.NewNamespacedSecretStore(mgr.GetClient(), f.Namespace()) + s := &server{ namespace: f.Namespace(), metricsAddress: config.metricsAddress, @@ -365,6 +367,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s config: config, mgr: mgr, credentialFileStore: credentialFileStore, + credentialSecretStore: credentialSecretStore, } return s, nil @@ -528,11 +531,11 @@ var defaultRestorePriorities = []string{ } func (s *server) initRestic() error { - // warn if restic daemonset does not exist - if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(s.ctx, restic.DaemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) { - s.logger.Warn("Velero restic daemonset not found; restic backups/restores will not work until it's created") + // warn if node agent does not exist + if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.DaemonsetNotFound { + s.logger.Warn("Velero node agent not found; pod volume backups/restores will not work until it's created") } else if err != nil { - s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero restic daemonset") + s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent") } // ensure the repo key secret is set up @@ -541,43 +544,13 @@ func (s *server) initRestic() error { } s.repoLocker = repository.NewRepoLocker() - s.repoEnsurer = repository.NewRepositoryEnsurer(s.sharedInformerFactory.Velero().V1().BackupRepositories(), s.veleroClient.VeleroV1(), s.logger) + s.repoEnsurer = repository.NewRepositoryEnsurer(s.mgr.GetClient(), s.logger) - s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.logger) + s.repoManager = repository.NewManager(s.namespace, s.mgr.GetClient(), s.repoLocker, s.repoEnsurer, s.credentialFileStore, s.credentialSecretStore, s.logger) return nil } -func (s *server) getCSISnapshotListers() (snapshotv1listers.VolumeSnapshotLister, snapshotv1listers.VolumeSnapshotContentLister, snapshotv1listers.VolumeSnapshotClassLister) { - // Make empty listers that will only be populated if CSI is properly enabled. - var vsLister snapshotv1listers.VolumeSnapshotLister - var vscLister snapshotv1listers.VolumeSnapshotContentLister - var vsClassLister snapshotv1listers.VolumeSnapshotClassLister - var err error - - // If CSI is enabled, check for the CSI groups and generate the listers - // If CSI isn't enabled, return empty listers. - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - _, err = s.discoveryClient.ServerResourcesForGroupVersion(snapshotv1api.SchemeGroupVersion.String()) - switch { - case apierrors.IsNotFound(err): - // CSI is enabled, but the required CRDs aren't installed, so halt. - s.logger.Fatalf("The '%s' feature flag was specified, but CSI API group [%s] was not found.", velerov1api.CSIFeatureFlag, snapshotv1api.SchemeGroupVersion.String()) - case err == nil: - // CSI is enabled, and the resources were found. - // Instantiate the listers fully - s.logger.Debug("Creating CSI listers") - // Access the wrapped factory directly here since we've already done the feature flag check above to know it's safe. - vsLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshots().Lister() - vscLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshotContents().Lister() - vsClassLister = s.csiSnapshotterSharedInformerFactory.factory.Snapshot().V1().VolumeSnapshotClasses().Lister() - case err != nil: - cmd.CheckError(err) - } - } - return vsLister, vscLister, vsClassLister -} - func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string) error { s.logger.Info("Starting controllers") @@ -602,31 +575,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupStoreGetter := persistence.NewObjectBackupStoreGetter(s.credentialFileStore) - csiVSLister, csiVSCLister, csiVSClassLister := s.getCSISnapshotListers() - - backupSyncControllerRunInfo := func() controllerRunInfo { - backupSyncContoller := controller.NewBackupSyncController( - s.veleroClient.VeleroV1(), - s.mgr.GetClient(), - s.veleroClient.VeleroV1(), - s.sharedInformerFactory.Velero().V1().Backups().Lister(), - csiVSLister, - s.config.backupSyncPeriod, - s.namespace, - s.csiSnapshotClient, - s.kubeClient, - s.config.defaultBackupLocation, - newPluginManager, - backupStoreGetter, - s.logger, - ) - - return controllerRunInfo{ - controller: backupSyncContoller, - numWorkers: defaultControllerWorkers, - } - } - backupTracker := controller.NewBackupTracker() backupControllerRunInfo := func() controllerRunInfo { @@ -636,10 +584,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string client.NewDynamicFactory(s.dynamicClient), podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()), podvolume.NewBackupperFactory(s.repoLocker, s.repoEnsurer, s.veleroClient, s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), s.sharedInformerFactory.Velero().V1().BackupRepositories().Informer().HasSynced, s.logger), + s.kubeClient.CoreV1(), s.kubeClient.CoreV1(), + s.sharedInformerFactory.Velero().V1().BackupRepositories().Informer().HasSynced, s.logger), s.config.podVolumeOperationTimeout, - s.config.defaultVolumesToRestic, + s.config.defaultVolumesToFsBackup, s.config.clientPageSize, + s.config.uploaderType, ) cmd.CheckError(err) @@ -654,18 +604,15 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupTracker, s.mgr.GetClient(), s.config.defaultBackupLocation, - s.config.defaultVolumesToRestic, + s.config.defaultVolumesToFsBackup, s.config.defaultBackupTTL, s.config.defaultCSISnapshotTimeout, s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(), defaultVolumeSnapshotLocations, s.metrics, s.config.formatFlag.Parse(), - csiVSLister, - s.csiSnapshotClient, - csiVSCLister, - csiVSClassLister, backupStoreGetter, + s.credentialFileStore, ) return controllerRunInfo{ @@ -674,22 +621,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } } - gcControllerRunInfo := func() controllerRunInfo { - gcController := controller.NewGCController( - s.logger, - s.sharedInformerFactory.Velero().V1().Backups(), - s.sharedInformerFactory.Velero().V1().DeleteBackupRequests().Lister(), - s.veleroClient.VeleroV1(), - s.mgr.GetClient(), - s.config.garbageCollectionFrequency, - ) - - return controllerRunInfo{ - controller: gcController, - numWorkers: defaultControllerWorkers, - } - } - restoreControllerRunInfo := func() controllerRunInfo { restorer, err := restore.NewKubernetesRestorer( s.veleroClient.VeleroV1(), @@ -704,6 +635,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.logger, podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()), s.kubeClient.CoreV1().RESTClient(), + s.credentialFileStore, ) cmd.CheckError(err) @@ -730,16 +662,24 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } } + // By far, PodVolumeBackup, PodVolumeRestore, BackupStorageLocation controllers + // are not included in --disable-controllers list. + // This is because of PVB and PVR are used by Restic DaemonSet, + // and BSL controller is mandatory for Velero to work. enabledControllers := map[string]func() controllerRunInfo{ - controller.BackupSync: backupSyncControllerRunInfo, - controller.Backup: backupControllerRunInfo, - controller.GarbageCollection: gcControllerRunInfo, - controller.Restore: restoreControllerRunInfo, + controller.Backup: backupControllerRunInfo, + controller.Restore: restoreControllerRunInfo, } // Note: all runtime type controllers that can be disabled are grouped separately, below: - enabledRuntimeControllers := make(map[string]struct{}) - enabledRuntimeControllers[controller.ServerStatusRequest] = struct{}{} - enabledRuntimeControllers[controller.DownloadRequest] = struct{}{} + enabledRuntimeControllers := map[string]struct{}{ + controller.ServerStatusRequest: {}, + controller.DownloadRequest: {}, + controller.Schedule: {}, + controller.ResticRepo: {}, + controller.BackupDeletion: {}, + controller.GarbageCollection: {}, + controller.BackupSync: {}, + } if s.config.restoreOnly { s.logger.Info("Restore only mode - not starting the backup, schedule, delete-backup, or GC controllers") @@ -752,7 +692,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } // Remove disabled controllers so they are not initialized. If a match is not found we want - // to hault the system so the user knows this operation was not possible. + // to halt the system so the user knows this operation was not possible. if err := removeControllers(s.config.disabledControllers, enabledControllers, enabledRuntimeControllers, s.logger); err != nil { log.Fatal(err, "unable to disable a controller") } @@ -786,71 +726,100 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.logger.WithField("informer", informer).Info("Informer cache synced") } - bslr := controller.BackupStorageLocationReconciler{ - Ctx: s.ctx, - Client: s.mgr.GetClient(), - Scheme: s.mgr.GetScheme(), - DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{ + bslr := controller.NewBackupStorageLocationReconciler( + s.ctx, + s.mgr.GetClient(), + s.mgr.GetScheme(), + storage.DefaultBackupLocationInfo{ StorageLocation: s.config.defaultBackupLocation, ServerValidationFrequency: s.config.storeValidationFrequency, }, - NewPluginManager: newPluginManager, - BackupStoreGetter: backupStoreGetter, - Log: s.logger, - } + newPluginManager, + backupStoreGetter, + s.logger, + ) if err := bslr.SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupStorageLocation) } - if err := controller.NewScheduleReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.metrics).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.Schedule) + if _, ok := enabledRuntimeControllers[controller.Schedule]; ok { + if err := controller.NewScheduleReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.metrics).SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", controller.Schedule) + } } - if err := controller.NewResticRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.defaultResticMaintenanceFrequency, s.repoManager).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.ResticRepo) + if _, ok := enabledRuntimeControllers[controller.ResticRepo]; ok { + if err := controller.NewResticRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.repoManager).SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", controller.ResticRepo) + } } - if err := controller.NewBackupDeletionReconciler( - s.logger, - s.mgr.GetClient(), - backupTracker, - s.repoManager, - s.metrics, - s.discoveryHelper, - newPluginManager, - backupStoreGetter, - ).SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupDeletion) + if _, ok := enabledRuntimeControllers[controller.BackupDeletion]; ok { + if err := controller.NewBackupDeletionReconciler( + s.logger, + s.mgr.GetClient(), + backupTracker, + s.repoManager, + s.metrics, + s.discoveryHelper, + newPluginManager, + backupStoreGetter, + ).SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupDeletion) + } } if _, ok := enabledRuntimeControllers[controller.ServerStatusRequest]; ok { - r := controller.ServerStatusRequestReconciler{ - Scheme: s.mgr.GetScheme(), - Client: s.mgr.GetClient(), - Ctx: s.ctx, - PluginRegistry: s.pluginRegistry, - Clock: clock.RealClock{}, - Log: s.logger, - } - if err := r.SetupWithManager(s.mgr); err != nil { + if err := controller.NewServerStatusRequestReconciler( + s.mgr.GetClient(), + s.ctx, + s.pluginRegistry, + clock.RealClock{}, + s.logger, + ).SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.ServerStatusRequest) } } if _, ok := enabledRuntimeControllers[controller.DownloadRequest]; ok { - r := controller.DownloadRequestReconciler{ - Scheme: s.mgr.GetScheme(), - Client: s.mgr.GetClient(), - Clock: clock.RealClock{}, - NewPluginManager: newPluginManager, - BackupStoreGetter: backupStoreGetter, - Log: s.logger, - } + r := controller.NewDownloadRequestReconciler( + s.mgr.GetClient(), + clock.RealClock{}, + newPluginManager, + backupStoreGetter, + s.logger, + ) if err := r.SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.DownloadRequest) } } + if _, ok := enabledRuntimeControllers[controller.BackupSync]; ok { + syncPeriod := s.config.backupSyncPeriod + if syncPeriod <= 0 { + syncPeriod = time.Minute + } + + backupSyncReconciler := controller.NewBackupSyncReconciler( + s.mgr.GetClient(), + s.namespace, + syncPeriod, + newPluginManager, + backupStoreGetter, + s.logger, + ) + if err := backupSyncReconciler.SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, " unable to create controller ", "controller ", controller.BackupSync) + } + } + + if _, ok := enabledRuntimeControllers[controller.GarbageCollection]; ok { + r := controller.NewGCReconciler(s.logger, s.mgr.GetClient(), s.config.garbageCollectionFrequency) + if err := r.SetupWithManager(s.mgr); err != nil { + s.logger.Fatal(err, "unable to create controller", "controller", controller.GarbageCollection) + } + } + // TODO(2.0): presuming all controllers and resources are converted to runtime-controller // by v2.0, the block from this line and including the `s.mgr.Start() will be // deprecated, since the manager auto-starts all the caches. Until then, we need to start the diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index df726e6bf9..e8110793c7 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -80,14 +80,14 @@ func TestRemoveControllers(t *testing.T) { errorExpected bool }{ { - name: "Remove one disabable controller", + name: "Remove one disable controller", disabledControllers: []string{ controller.Backup, }, errorExpected: false, }, { - name: "Remove all disabable controllers", + name: "Remove all disable controllers", disabledControllers: []string{ controller.Backup, controller.BackupDeletion, @@ -102,7 +102,7 @@ func TestRemoveControllers(t *testing.T) { errorExpected: false, }, { - name: "Remove with a non-disabable controller included", + name: "Remove with a non-disable controller included", disabledControllers: []string{ controller.Backup, controller.BackupStorageLocation, @@ -110,7 +110,7 @@ func TestRemoveControllers(t *testing.T) { errorExpected: true, }, { - name: "Remove with a misspelled/inexisting controller name", + name: "Remove with a misspelled/non-existing controller name", disabledControllers: []string{ "go", }, @@ -122,16 +122,16 @@ func TestRemoveControllers(t *testing.T) { enabledControllers := map[string]func() controllerRunInfo{ controller.BackupSync: func() controllerRunInfo { return controllerRunInfo{} }, controller.Backup: func() controllerRunInfo { return controllerRunInfo{} }, - controller.Schedule: func() controllerRunInfo { return controllerRunInfo{} }, controller.GarbageCollection: func() controllerRunInfo { return controllerRunInfo{} }, - controller.BackupDeletion: func() controllerRunInfo { return controllerRunInfo{} }, controller.Restore: func() controllerRunInfo { return controllerRunInfo{} }, - controller.ResticRepo: func() controllerRunInfo { return controllerRunInfo{} }, - controller.DownloadRequest: func() controllerRunInfo { return controllerRunInfo{} }, } enabledRuntimeControllers := map[string]struct{}{ controller.ServerStatusRequest: {}, + controller.Schedule: {}, + controller.BackupDeletion: {}, + controller.ResticRepo: {}, + controller.DownloadRequest: {}, } totalNumOriginalControllers := len(enabledControllers) + len(enabledRuntimeControllers) diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index 22861245f3..f131b32e0c 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -40,6 +40,7 @@ import ( // ErrNotFound is exported for external packages to check for when a file is // not found var ErrNotFound = errors.New("file not found") +var ErrDownloadRequestDownloadURLTimeout = errors.New("download request download url timeout, check velero server logs for errors. backup storage location may not be available") func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name string, kind velerov1api.DownloadTargetKind, w io.Writer, timeout time.Duration, insecureSkipTLSVerify bool, caCertFile string) error { uuid, err := uuid.NewRandom() @@ -58,7 +59,14 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin defer cancel() key := kbclient.ObjectKey{Name: created.Name, Namespace: namespace} + timeStreamFirstCheck := time.Now() + downloadUrlTimeout := false checkFunc := func() { + // if timeout has been reached, cancel request + if time.Now().After(timeStreamFirstCheck.Add(timeout)) { + downloadUrlTimeout = true + cancel() + } updated := &velerov1api.DownloadRequest{} if err := kbClient.Get(ctx, key, updated); err != nil { return @@ -77,9 +85,8 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin } wait.Until(checkFunc, 25*time.Millisecond, ctx.Done()) - - if created.Status.DownloadURL == "" { - return ErrNotFound + if downloadUrlTimeout { + return ErrDownloadRequestDownloadURLTimeout } var caPool *x509.CertPool diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index 1ec0928316..a6a2f6f996 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -110,7 +110,6 @@ func DescribeBackup( d.Println() DescribePodVolumeBackups(d, podVolumeBackups, details) } - }) } @@ -165,6 +164,9 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { d.Println() d.Printf("TTL:\t%s\n", spec.TTL.Duration) + d.Println() + d.Printf("CSISnapshotTimeout:\t%s\n", &spec.CSISnapshotTimeout.Duration) + d.Println() if len(spec.Hooks.Resources) == 0 { d.Printf("Hooks:\t\n") @@ -241,7 +243,6 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { d.Printf("\t%s: %s\n", key, value) } } - } // DescribeBackupStatus describes a backup status in human-readable format. @@ -402,10 +403,19 @@ func failedDeletionCount(requests []velerov1api.DeleteBackupRequest) int { // DescribePodVolumeBackups describes pod volume backups in human-readable format. func DescribePodVolumeBackups(d *Describer, backups []velerov1api.PodVolumeBackup, details bool) { + // Get the type of pod volume uploader. Since the uploader only comes from a single source, we can + // take the uploader type from the first element of the array. + var uploaderType string + if len(backups) > 0 { + uploaderType = backups[0].Spec.UploaderType + } else { + return + } + if details { - d.Printf("Restic Backups:\n") + d.Printf("%s Backups:\n", uploaderType) } else { - d.Printf("Restic Backups (specify --details for more information):\n") + d.Printf("%s Backups (specify --details for more information):\n", uploaderType) } // separate backups by phase (combining and New into a single group) @@ -486,7 +496,7 @@ func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veler key := fmt.Sprintf("%s/%s", namespace, name) // append backup progress percentage if backup is in progress - if phase == "In Progress" && progress != (velerov1api.PodVolumeOperationProgress{}) { + if phase == "In Progress" && progress.TotalBytes != 0 { volume = fmt.Sprintf("%s (%.2f%%)", volume, float64(progress.BytesDone)/float64(progress.TotalBytes)*100) } diff --git a/pkg/cmd/util/output/output.go b/pkg/cmd/util/output/output.go index ab3f7a95d6..1b0a992198 100644 --- a/pkg/cmd/util/output/output.go +++ b/pkg/cmd/util/output/output.go @@ -41,7 +41,7 @@ const downloadRequestTimeout = 30 * time.Second func BindFlags(flags *pflag.FlagSet) { flags.StringP("output", "o", "table", "Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. 'table' is not valid for the install command.") labelColumns := flag.NewStringArray() - flags.Var(&labelColumns, "label-columns", "A comma-separated list of labels to be displayed as columns") + flags.VarP(&labelColumns, "label-columns", "L", "Accepts a comma separated list of labels that are going to be presented as columns. Names are case-sensitive. You can also use multiple flag options like -L label1 -L label2...") flags.Bool("show-labels", false, "Show labels in the last column") } diff --git a/pkg/cmd/util/output/restore_describer.go b/pkg/cmd/util/output/restore_describer.go index 286467d038..f6b5f2b378 100644 --- a/pkg/cmd/util/output/restore_describer.go +++ b/pkg/cmd/util/output/restore_describer.go @@ -205,10 +205,19 @@ func describeRestoreResult(d *Describer, name string, result pkgrestore.Result) // describePodVolumeRestores describes pod volume restores in human-readable format. func describePodVolumeRestores(d *Describer, restores []velerov1api.PodVolumeRestore, details bool) { + // Get the type of pod volume uploader. Since the uploader only comes from a single source, we can + // take the uploader type from the first element of the array. + var uploaderType string + if len(restores) > 0 { + uploaderType = restores[0].Spec.UploaderType + } else { + return + } + if details { - d.Printf("Restic Restores:\n") + d.Printf("%s Restores:\n", uploaderType) } else { - d.Printf("Restic Restores (specify --details for more information):\n") + d.Printf("%s Restores (specify --details for more information):\n", uploaderType) } // separate restores by phase (combining and New into a single group) diff --git a/pkg/cmd/util/output/schedule_describer.go b/pkg/cmd/util/output/schedule_describer.go index 46a65ba182..48f3669dbd 100644 --- a/pkg/cmd/util/output/schedule_describer.go +++ b/pkg/cmd/util/output/schedule_describer.go @@ -52,6 +52,9 @@ func DescribeSchedule(schedule *v1.Schedule) string { } } + d.Println() + d.Printf("Paused:\t%t\n", schedule.Spec.Paused) + d.Println() DescribeScheduleSpec(d, schedule.Spec) diff --git a/pkg/cmd/util/output/schedule_printer.go b/pkg/cmd/util/output/schedule_printer.go index 4c7f7821e4..e39ee90692 100644 --- a/pkg/cmd/util/output/schedule_printer.go +++ b/pkg/cmd/util/output/schedule_printer.go @@ -36,6 +36,7 @@ var ( {Name: "Backup TTL"}, {Name: "Last Backup"}, {Name: "Selector"}, + {Name: "Paused"}, } ) @@ -71,6 +72,7 @@ func printSchedule(schedule *v1.Schedule) []metav1.TableRow { schedule.Spec.Template.TTL.Duration, humanReadableTimeFromNow(lastBackupTime), metav1.FormatLabelSelector(schedule.Spec.Template.LabelSelector), + schedule.Spec.Paused, ) return []metav1.TableRow{row} diff --git a/pkg/cmd/velero/velero.go b/pkg/cmd/velero/velero.go index 8775f69cd0..fe712adc02 100644 --- a/pkg/cmd/velero/velero.go +++ b/pkg/cmd/velero/velero.go @@ -23,7 +23,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/vmware-tanzu/velero/pkg/cmd/cli/debug" diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 59080abf37..39444459e4 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -28,7 +28,6 @@ import ( "sync" "time" - "github.com/apex/log" jsonpatch "github.com/evanphx/json-patch" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -47,9 +46,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/csi" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" + "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/storage" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" @@ -76,28 +74,25 @@ import ( type backupController struct { *genericController - discoveryHelper discovery.Helper - backupper pkgbackup.Backupper - lister velerov1listers.BackupLister - client velerov1client.BackupsGetter - kbClient kbclient.Client - clock clock.Clock - backupLogLevel logrus.Level - newPluginManager func(logrus.FieldLogger) clientmgmt.Manager - backupTracker BackupTracker - defaultBackupLocation string - defaultVolumesToRestic bool - defaultBackupTTL time.Duration - defaultCSISnapshotTimeout time.Duration - snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister - defaultSnapshotLocations map[string]string - metrics *metrics.ServerMetrics - backupStoreGetter persistence.ObjectBackupStoreGetter - formatFlag logging.Format - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister - volumeSnapshotClient *snapshotterClientSet.Clientset - volumeSnapshotContentLister snapshotv1listers.VolumeSnapshotContentLister - volumeSnapshotClassLister snapshotv1listers.VolumeSnapshotClassLister + discoveryHelper discovery.Helper + backupper pkgbackup.Backupper + lister velerov1listers.BackupLister + client velerov1client.BackupsGetter + kbClient kbclient.Client + clock clock.Clock + backupLogLevel logrus.Level + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager + backupTracker BackupTracker + defaultBackupLocation string + defaultVolumesToFsBackup bool + defaultBackupTTL time.Duration + defaultCSISnapshotTimeout time.Duration + snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister + defaultSnapshotLocations map[string]string + metrics *metrics.ServerMetrics + backupStoreGetter persistence.ObjectBackupStoreGetter + formatFlag logging.Format + credentialFileStore credentials.FileStore } func NewBackupController( @@ -111,43 +106,37 @@ func NewBackupController( backupTracker BackupTracker, kbClient kbclient.Client, defaultBackupLocation string, - defaultVolumesToRestic bool, + defaultVolumesToFsBackup bool, defaultBackupTTL time.Duration, defaultCSISnapshotTimeout time.Duration, volumeSnapshotLocationLister velerov1listers.VolumeSnapshotLocationLister, defaultSnapshotLocations map[string]string, metrics *metrics.ServerMetrics, formatFlag logging.Format, - volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, - volumeSnapshotClient *snapshotterClientSet.Clientset, - volumeSnapshotContentLister snapshotv1listers.VolumeSnapshotContentLister, - volumesnapshotClassLister snapshotv1listers.VolumeSnapshotClassLister, backupStoreGetter persistence.ObjectBackupStoreGetter, + credentialStore credentials.FileStore, ) Interface { c := &backupController{ - genericController: newGenericController(Backup, logger), - discoveryHelper: discoveryHelper, - backupper: backupper, - lister: backupInformer.Lister(), - client: client, - clock: &clock.RealClock{}, - backupLogLevel: backupLogLevel, - newPluginManager: newPluginManager, - backupTracker: backupTracker, - kbClient: kbClient, - defaultBackupLocation: defaultBackupLocation, - defaultVolumesToRestic: defaultVolumesToRestic, - defaultBackupTTL: defaultBackupTTL, - defaultCSISnapshotTimeout: defaultCSISnapshotTimeout, - snapshotLocationLister: volumeSnapshotLocationLister, - defaultSnapshotLocations: defaultSnapshotLocations, - metrics: metrics, - formatFlag: formatFlag, - volumeSnapshotLister: volumeSnapshotLister, - volumeSnapshotClient: volumeSnapshotClient, - volumeSnapshotContentLister: volumeSnapshotContentLister, - volumeSnapshotClassLister: volumesnapshotClassLister, - backupStoreGetter: backupStoreGetter, + genericController: newGenericController(Backup, logger), + discoveryHelper: discoveryHelper, + backupper: backupper, + lister: backupInformer.Lister(), + client: client, + clock: &clock.RealClock{}, + backupLogLevel: backupLogLevel, + newPluginManager: newPluginManager, + backupTracker: backupTracker, + kbClient: kbClient, + defaultBackupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: defaultVolumesToFsBackup, + defaultBackupTTL: defaultBackupTTL, + defaultCSISnapshotTimeout: defaultCSISnapshotTimeout, + snapshotLocationLister: volumeSnapshotLocationLister, + defaultSnapshotLocations: defaultSnapshotLocations, + metrics: metrics, + formatFlag: formatFlag, + backupStoreGetter: backupStoreGetter, + credentialFileStore: credentialStore, } c.syncHandler = c.processBackup @@ -260,7 +249,7 @@ func (c *backupController) processBackup(key string) error { } log.Debug("Preparing backup request") - request := c.prepareBackupRequest(original) + request := c.prepareBackupRequest(original, log) if len(request.Status.ValidationErrors) > 0 { request.Status.Phase = velerov1api.BackupPhaseFailedValidation } else { @@ -346,7 +335,7 @@ func patchBackup(original, updated *velerov1api.Backup, client velerov1client.Ba return res, nil } -func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkgbackup.Request { +func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup, logger logrus.FieldLogger) *pkgbackup.Request { request := &pkgbackup.Request{ Backup: backup.DeepCopy(), // don't modify items in the cache } @@ -370,8 +359,15 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg // calculate expiration request.Status.Expiration = &metav1.Time{Time: c.clock.Now().Add(request.Spec.TTL.Duration)} - if request.Spec.DefaultVolumesToRestic == nil { - request.Spec.DefaultVolumesToRestic = &c.defaultVolumesToRestic + // TODO: post v1.10. Remove this code block after DefaultVolumesToRestic is removed from CRD + // For now, for CRs created by old versions, we need to respect the DefaultVolumesToRestic value if it is set true + if boolptr.IsSetToTrue(request.Spec.DefaultVolumesToRestic) { + logger.Warn("DefaultVolumesToRestic field will be deprecated, use DefaultVolumesToFsBackup instead. Automatically remap it to DefaultVolumesToFsBackup") + request.Spec.DefaultVolumesToFsBackup = request.Spec.DefaultVolumesToRestic + } + + if request.Spec.DefaultVolumesToFsBackup == nil { + request.Spec.DefaultVolumesToFsBackup = &c.defaultVolumesToFsBackup } // find which storage location to use @@ -566,6 +562,15 @@ func (c *backupController) validateAndGetSnapshotLocations(backup *velerov1api.B return nil, errors } + // add credential to config for each location + for _, location := range providerLocations { + err = volume.UpdateVolumeSnapshotLocationWithCredentialConfig(location, c.credentialFileStore, c.logger) + if err != nil { + errors = append(errors, fmt.Sprintf("error adding credentials to volume snapshot location named %s: %v", location.Name, err)) + continue + } + } + return providerLocations, nil } @@ -643,47 +648,51 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { // Empty slices here so that they can be passed in to the persistBackup call later, regardless of whether or not CSI's enabled. // This way, we only make the Lister call if the feature flag's on. - var volumeSnapshots []*snapshotv1api.VolumeSnapshot - var volumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent - var volumeSnapshotClasses []*snapshotv1api.VolumeSnapshotClass + var volumeSnapshots []snapshotv1api.VolumeSnapshot + var volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent + var volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass if features.IsEnabled(velerov1api.CSIFeatureFlag) { selector := label.NewSelectorForBackup(backup.Name) // Listers are wrapped in a nil check out of caution, since they may not be populated based on the // EnableCSI feature flag. This is more to guard against programmer error, as they shouldn't be nil // when EnableCSI is on. - if c.volumeSnapshotLister != nil { - volumeSnapshots, err = c.volumeSnapshotLister.List(selector) - if err != nil { - backupLog.Error(err) - } + vsList := &snapshotv1api.VolumeSnapshotList{} + vscList := &snapshotv1api.VolumeSnapshotContentList{} - err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots, backup.Spec.CSISnapshotTimeout.Duration) - if err != nil { - backupLog.Errorf("fail to wait VolumeSnapshot change to Ready: %s", err.Error()) - } - - backup.CSISnapshots = volumeSnapshots + err = c.kbClient.List(context.Background(), vsList, &kbclient.ListOptions{LabelSelector: selector}) + if err != nil { + backupLog.Error(err) } + if len(vsList.Items) >= 0 { + volumeSnapshots = vsList.Items + } + err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots, backup.Spec.CSISnapshotTimeout.Duration) + if err != nil { + backupLog.Errorf("fail to wait VolumeSnapshot change to Ready: %s", err.Error()) + } + backup.CSISnapshots = volumeSnapshots - if c.volumeSnapshotContentLister != nil { - volumeSnapshotContents, err = c.volumeSnapshotContentLister.List(selector) - if err != nil { - backupLog.Error(err) - } + err = c.kbClient.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}) + if err != nil { + backupLog.Error(err) } + if len(vscList.Items) >= 0 { + volumeSnapshotContents = vscList.Items + } + vsClassSet := sets.NewString() for _, vsc := range volumeSnapshotContents { // persist the volumesnapshotclasses referenced by vsc - if c.volumeSnapshotClassLister != nil && - vsc.Spec.VolumeSnapshotClassName != nil && - !vsClassSet.Has(*vsc.Spec.VolumeSnapshotClassName) { - if vsClass, err := c.volumeSnapshotClassLister.Get(*vsc.Spec.VolumeSnapshotClassName); err != nil { + if vsc.Spec.VolumeSnapshotClassName != nil && !vsClassSet.Has(*vsc.Spec.VolumeSnapshotClassName) { + vsClass := &snapshotv1api.VolumeSnapshotClass{} + if err := c.kbClient.Get(context.TODO(), kbclient.ObjectKey{Name: *vsc.Spec.VolumeSnapshotClassName}, vsClass); err != nil { backupLog.Error(err) } else { vsClassSet.Insert(*vsc.Spec.VolumeSnapshotClassName) - volumeSnapshotClasses = append(volumeSnapshotClasses, vsClass) + volumeSnapshotClasses = append(volumeSnapshotClasses, *vsClass) } } + if err := csi.ResetVolumeSnapshotContent(vsc); err != nil { backupLog.Error(err) } @@ -787,9 +796,9 @@ func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File, backupStore persistence.BackupStore, log logrus.FieldLogger, - csiVolumeSnapshots []*snapshotv1api.VolumeSnapshot, - csiVolumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent, - csiVolumesnapshotClasses []*snapshotv1api.VolumeSnapshotClass, + csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, + csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, + csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, ) []error { persistErrs := []error{} backupJSON := new(bytes.Buffer) @@ -898,7 +907,7 @@ func encodeToJSONGzip(data interface{}, desc string) (*bytes.Buffer, []error) { // using goroutine here instead of waiting in CSI plugin, because it's not easy to make BackupItemAction // parallel by now. After BackupItemAction parallel is implemented, this logic should be moved to CSI plugin // as https://github.com/vmware-tanzu/velero-plugin-for-csi/pull/100 -func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []*snapshotv1api.VolumeSnapshot, +func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []snapshotv1api.VolumeSnapshot, csiSnapshotTimeout time.Duration) error { eg, _ := errgroup.WithContext(ctx) timeout := csiSnapshotTimeout @@ -908,19 +917,20 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo volumeSnapshot := vs eg.Go(func() error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - tmpVS, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(volumeSnapshot.Namespace).Get(ctx, volumeSnapshot.Name, metav1.GetOptions{}) + tmpVS := &snapshotv1api.VolumeSnapshot{} + err := c.kbClient.Get(ctx, kbclient.ObjectKey{Name: volumeSnapshot.Name, Namespace: volumeSnapshot.Namespace}, tmpVS) if err != nil { return false, errors.Wrapf(err, fmt.Sprintf("failed to get volumesnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name)) } if tmpVS.Status == nil || tmpVS.Status.BoundVolumeSnapshotContentName == nil || !boolptr.IsSetToTrue(tmpVS.Status.ReadyToUse) { - log.Infof("Waiting for CSI driver to reconcile volumesnapshot %s/%s. Retrying in %ds", volumeSnapshot.Namespace, volumeSnapshot.Name, interval/time.Second) + c.logger.Infof("Waiting for CSI driver to reconcile volumesnapshot %s/%s. Retrying in %ds", volumeSnapshot.Namespace, volumeSnapshot.Name, interval/time.Second) return false, nil } return true, nil }) if err == wait.ErrWaitTimeout { - log.Errorf("Timed out awaiting reconciliation of volumesnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) + c.logger.Errorf("Timed out awaiting reconciliation of volumesnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) } return err }) @@ -933,24 +943,29 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo // which will cause snapshot deletion on cloud provider, then backup cannot restore the PV. // If DeletionPolicy is Retain, just delete it. If DeletionPolicy is Delete, need to // change DeletionPolicy to Retain before deleting VS, then change DeletionPolicy back to Delete. -func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api.VolumeSnapshot, - volumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent, +func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []snapshotv1api.VolumeSnapshot, + volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, backup pkgbackup.Request, logger logrus.FieldLogger) { var wg sync.WaitGroup - vscMap := make(map[string]*snapshotv1api.VolumeSnapshotContent) + vscMap := make(map[string]snapshotv1api.VolumeSnapshotContent) for _, vsc := range volumeSnapshotContents { vscMap[vsc.Name] = vsc } for _, vs := range volumeSnapshots { wg.Add(1) - go func(vs *snapshotv1api.VolumeSnapshot) { + go func(vs snapshotv1api.VolumeSnapshot) { defer wg.Done() - var vsc *snapshotv1api.VolumeSnapshotContent + var vsc snapshotv1api.VolumeSnapshotContent modifyVSCFlag := false if vs.Status.BoundVolumeSnapshotContentName != nil && len(*vs.Status.BoundVolumeSnapshotContentName) > 0 { - vsc = vscMap[*vs.Status.BoundVolumeSnapshotContentName] + var found bool + if vsc, found = vscMap[*vs.Status.BoundVolumeSnapshotContentName]; !found { + logger.Errorf("Not find %s from the vscMap", vs.Status.BoundVolumeSnapshotContentName) + return + } + if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete { modifyVSCFlag = true } @@ -964,7 +979,7 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api logger.Debugf("Patching VolumeSnapshotContent %s", vsc.Name) original := vsc.DeepCopy() vsc.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain - if err := c.kbClient.Patch(context.Background(), vsc, kbclient.MergeFrom(original)); err != nil { + if err := c.kbClient.Patch(context.Background(), &vsc, kbclient.MergeFrom(original)); err != nil { logger.Errorf("fail to modify VolumeSnapshotContent %s DeletionPolicy to Retain: %s", vsc.Name, err.Error()) return } @@ -980,7 +995,7 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api // Delete VolumeSnapshot from cluster logger.Debugf("Deleting VolumeSnapshotContent %s", vsc.Name) - err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(vs.Namespace).Delete(context.TODO(), vs.Name, metav1.DeleteOptions{}) + err := c.kbClient.Delete(context.TODO(), &vs) if err != nil { logger.Errorf("fail to delete VolumeSnapshot %s/%s: %s", vs.Namespace, vs.Name, err.Error()) } @@ -995,18 +1010,19 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api // and Source. Source is updated to let csi-controller thinks the VSC is statically provsisioned with VS. // Set VolumeSnapshotRef's UID to nil will let the csi-controller finds out the related VS is gone, then // VSC can be deleted. -func (c *backupController) recreateVolumeSnapshotContent(vsc *snapshotv1api.VolumeSnapshotContent) error { +func (c *backupController) recreateVolumeSnapshotContent(vsc snapshotv1api.VolumeSnapshotContent) error { timeout := 1 * time.Minute interval := 1 * time.Second - err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Delete(context.TODO(), vsc.Name, metav1.DeleteOptions{}) + err := c.kbClient.Delete(context.TODO(), &vsc) if err != nil { return errors.Wrapf(err, "fail to delete VolumeSnapshotContent: %s", vsc.Name) } // Check VolumeSnapshotContents is already deleted, before re-creating it. err = wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Get(context.TODO(), vsc.Name, metav1.GetOptions{}) + tmpVSC := &snapshotv1api.VolumeSnapshotContent{} + err := c.kbClient.Get(context.TODO(), kbclient.ObjectKey{Name: vsc.Name}, tmpVSC) if err != nil { if apierrors.IsNotFound(err) { return true, nil @@ -1035,7 +1051,7 @@ func (c *backupController) recreateVolumeSnapshotContent(vsc *snapshotv1api.Volu } // ResourceVersion shouldn't exist for new creation. vsc.ResourceVersion = "" - _, err = c.volumeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Create(context.TODO(), vsc, metav1.CreateOptions{}) + err = c.kbClient.Create(context.TODO(), &vsc) if err != nil { return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", vsc.Name) } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 29e7ecc9c5..4617975681 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -33,6 +33,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/version" kbclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,7 +50,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/plugin/framework" pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -59,7 +60,7 @@ type fakeBackupper struct { mock.Mock } -func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, actions []velero.BackupItemAction, volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { +func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, actions []biav1.BackupItemAction, volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { args := b.Called(logger, backup, backupFile, actions, volumeSnapshotterGetter) return args.Error(0) } @@ -280,13 +281,124 @@ func TestBackupLocationLabel(t *testing.T) { formatFlag: formatFlag, } - res := c.prepareBackupRequest(test.backup) + res := c.prepareBackupRequest(test.backup, logger) assert.NotNil(t, res) assert.Equal(t, test.expectedBackupLocation, res.Labels[velerov1api.StorageLocationLabel]) }) } } +func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { + var ( + defaultBackupTTL = metav1.Duration{Duration: 24 * 30 * time.Hour} + defaultBackupLocation = "default-location" + ) + + now, err := time.Parse(time.RFC1123Z, time.RFC1123Z) + require.NoError(t, err) + + tests := []struct { + name string + backup *velerov1api.Backup + backupLocationNameInBackup string + backupLocationInApiServer *velerov1api.BackupStorageLocation + defaultBackupLocationInApiServer *velerov1api.BackupStorageLocation + expectedBackupLocation string + expectedSuccess bool + expectedValidationError string + }{ + { + name: "BackupLocation is specified in backup CR'spec and it can be found in ApiServer", + backup: builder.ForBackup("velero", "backup-1").Result(), + backupLocationNameInBackup: "test-backup-location", + backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), + defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), + expectedBackupLocation: "test-backup-location", + expectedSuccess: true, + }, + { + name: "BackupLocation is specified in backup CR'spec and it can't be found in ApiServer", + backup: builder.ForBackup("velero", "backup-1").Result(), + backupLocationNameInBackup: "test-backup-location", + backupLocationInApiServer: nil, + defaultBackupLocationInApiServer: nil, + expectedSuccess: false, + expectedValidationError: "an existing backup storage location wasn't specified at backup creation time and the default 'test-backup-location' wasn't found. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"test-backup-location\" not found", + }, + { + name: "Using default BackupLocation and it can be found in ApiServer", + backup: builder.ForBackup("velero", "backup-1").Result(), + backupLocationNameInBackup: "", + backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(), + defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(), + expectedBackupLocation: defaultBackupLocation, + expectedSuccess: true, + }, + { + name: "Using default BackupLocation and it can't be found in ApiServer", + backup: builder.ForBackup("velero", "backup-1").Result(), + backupLocationNameInBackup: "", + backupLocationInApiServer: nil, + defaultBackupLocationInApiServer: nil, + expectedSuccess: false, + expectedValidationError: fmt.Sprintf("an existing backup storage location wasn't specified at backup creation time and the server default '%s' doesn't exist. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"%s\" not found", defaultBackupLocation, defaultBackupLocation), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Arrange + var ( + formatFlag = logging.FormatText + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) + apiServer = velerotest.NewAPIServer(t) + sharedInformers = informers.NewSharedInformerFactory(apiServer.VeleroClient, 0) + ) + + // objects that should init with client + objects := make([]runtime.Object, 0) + if test.backupLocationInApiServer != nil { + objects = append(objects, test.backupLocationInApiServer) + } + if test.defaultBackupLocationInApiServer != nil { + objects = append(objects, test.defaultBackupLocationInApiServer) + } + fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objects...) + + discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, logger) + require.NoError(t, err) + + c := &backupController{ + genericController: newGenericController("backup-test", logger), + discoveryHelper: discoveryHelper, + defaultBackupLocation: defaultBackupLocation, + kbClient: fakeClient, + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), + defaultBackupTTL: defaultBackupTTL.Duration, + clock: clock.NewFakeClock(now), + formatFlag: formatFlag, + } + + test.backup.Spec.StorageLocation = test.backupLocationNameInBackup + + // Run + res := c.prepareBackupRequest(test.backup, logger) + + // Assert + if test.expectedSuccess { + assert.Equal(t, test.expectedBackupLocation, res.Spec.StorageLocation) + assert.NotNil(t, res) + } else { + // in every test case, we only trigger one error at once + if len(res.Status.ValidationErrors) > 1 { + assert.Fail(t, "multi error found in request") + } + assert.Equal(t, test.expectedValidationError, res.Status.ValidationErrors[0]) + } + }) + } +} + func TestDefaultBackupTTL(t *testing.T) { var ( defaultBackupTTL = metav1.Duration{Duration: 24 * 30 * time.Hour} @@ -342,7 +454,7 @@ func TestDefaultBackupTTL(t *testing.T) { formatFlag: formatFlag, } - res := c.prepareBackupRequest(test.backup) + res := c.prepareBackupRequest(test.backup, logger) assert.NotNil(t, res) assert.Equal(t, test.expectedTTL, res.Spec.TTL) assert.Equal(t, test.expectedExpiration, *res.Status.Expiration) @@ -350,6 +462,121 @@ func TestDefaultBackupTTL(t *testing.T) { } } +func TestDefaultVolumesToResticDeprecation(t *testing.T) { + tests := []struct { + name string + backup *velerov1api.Backup + globalVal bool + expectGlobal bool + expectRemap bool + expectVal bool + }{ + { + name: "DefaultVolumesToRestic is not set, DefaultVolumesToFsBackup is not set", + backup: defaultBackup().Result(), + globalVal: true, + expectGlobal: true, + expectVal: true, + }, + { + name: "DefaultVolumesToRestic is not set, DefaultVolumesToFsBackup is set to false", + backup: defaultBackup().DefaultVolumesToFsBackup(false).Result(), + globalVal: true, + expectVal: false, + }, + { + name: "DefaultVolumesToRestic is not set, DefaultVolumesToFsBackup is set to true", + backup: defaultBackup().DefaultVolumesToFsBackup(true).Result(), + globalVal: false, + expectVal: true, + }, + { + name: "DefaultVolumesToRestic is set to false, DefaultVolumesToFsBackup is not set", + backup: defaultBackup().DefaultVolumesToRestic(false).Result(), + globalVal: false, + expectGlobal: true, + expectVal: false, + }, + { + name: "DefaultVolumesToRestic is set to false, DefaultVolumesToFsBackup is set to true", + backup: defaultBackup().DefaultVolumesToRestic(false).DefaultVolumesToFsBackup(true).Result(), + globalVal: false, + expectVal: true, + }, + { + name: "DefaultVolumesToRestic is set to false, DefaultVolumesToFsBackup is set to false", + backup: defaultBackup().DefaultVolumesToRestic(false).DefaultVolumesToFsBackup(false).Result(), + globalVal: true, + expectVal: false, + }, + { + name: "DefaultVolumesToRestic is set to true, DefaultVolumesToFsBackup is not set", + backup: defaultBackup().DefaultVolumesToRestic(true).Result(), + globalVal: false, + expectRemap: true, + expectVal: true, + }, + { + name: "DefaultVolumesToRestic is set to true, DefaultVolumesToFsBackup is set to false", + backup: defaultBackup().DefaultVolumesToRestic(true).DefaultVolumesToFsBackup(false).Result(), + globalVal: false, + expectRemap: true, + expectVal: true, + }, + { + name: "DefaultVolumesToRestic is set to true, DefaultVolumesToFsBackup is set to true", + backup: defaultBackup().DefaultVolumesToRestic(true).DefaultVolumesToFsBackup(true).Result(), + globalVal: false, + expectRemap: true, + expectVal: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText + + var ( + clientset = fake.NewSimpleClientset(test.backup) + sharedInformers = informers.NewSharedInformerFactory(clientset, 0) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) + fakeClient = velerotest.NewFakeControllerRuntimeClient(t) + ) + + apiServer := velerotest.NewAPIServer(t) + discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, logger) + require.NoError(t, err) + + c := &backupController{ + genericController: newGenericController("backup-test", logger), + discoveryHelper: discoveryHelper, + client: clientset.VeleroV1(), + lister: sharedInformers.Velero().V1().Backups().Lister(), + kbClient: fakeClient, + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), + clock: &clock.RealClock{}, + formatFlag: formatFlag, + defaultVolumesToFsBackup: test.globalVal, + } + + res := c.prepareBackupRequest(test.backup, logger) + assert.NotNil(t, res) + assert.NotNil(t, res.Spec.DefaultVolumesToFsBackup) + if test.expectRemap { + assert.Equal(t, res.Spec.DefaultVolumesToRestic, res.Spec.DefaultVolumesToFsBackup) + } else if test.expectGlobal { + assert.False(t, res.Spec.DefaultVolumesToRestic == res.Spec.DefaultVolumesToFsBackup) + assert.Equal(t, &c.defaultVolumesToFsBackup, res.Spec.DefaultVolumesToFsBackup) + } else { + assert.False(t, res.Spec.DefaultVolumesToRestic == res.Spec.DefaultVolumesToFsBackup) + assert.False(t, &c.defaultVolumesToFsBackup == res.Spec.DefaultVolumesToFsBackup) + } + + assert.Equal(t, test.expectVal, *res.Spec.DefaultVolumesToFsBackup) + }) + } +} + func TestProcessBackupCompletions(t *testing.T) { defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Default(true).Bucket("store-1").Result() @@ -359,20 +586,20 @@ func TestProcessBackupCompletions(t *testing.T) { timestamp := metav1.NewTime(now) tests := []struct { - name string - backup *velerov1api.Backup - backupLocation *velerov1api.BackupStorageLocation - defaultVolumesToRestic bool - expectedResult *velerov1api.Backup - backupExists bool - existenceCheckError error + name string + backup *velerov1api.Backup + backupLocation *velerov1api.BackupStorageLocation + defaultVolumesToFsBackup bool + expectedResult *velerov1api.Backup + backupExists bool + existenceCheckError error }{ // Completed { - name: "backup with no backup location gets the default", - backup: defaultBackup().Result(), - backupLocation: defaultBackupLocation, - defaultVolumesToRestic: true, + name: "backup with no backup location gets the default", + backup: defaultBackup().Result(), + backupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -391,8 +618,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -405,10 +632,10 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup with a specific backup location keeps it", - backup: defaultBackup().StorageLocation("alt-loc").Result(), - backupLocation: builder.ForBackupStorageLocation("velero", "alt-loc").Bucket("store-1").Result(), - defaultVolumesToRestic: false, + name: "backup with a specific backup location keeps it", + backup: defaultBackup().StorageLocation("alt-loc").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "alt-loc").Bucket("store-1").Result(), + defaultVolumesToFsBackup: false, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -427,8 +654,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: "alt-loc", - DefaultVolumesToRestic: boolptr.False(), + StorageLocation: "alt-loc", + DefaultVolumesToFsBackup: boolptr.False(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -447,7 +674,7 @@ func TestProcessBackupCompletions(t *testing.T) { Bucket("store-1"). AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite). Result(), - defaultVolumesToRestic: true, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -466,8 +693,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: "read-write", - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: "read-write", + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -480,10 +707,10 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup with a TTL has expiration set", - backup: defaultBackup().TTL(10 * time.Minute).Result(), - backupLocation: defaultBackupLocation, - defaultVolumesToRestic: false, + name: "backup with a TTL has expiration set", + backup: defaultBackup().TTL(10 * time.Minute).Result(), + backupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: false, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -502,9 +729,9 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - TTL: metav1.Duration{Duration: 10 * time.Minute}, - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.False(), + TTL: metav1.Duration{Duration: 10 * time.Minute}, + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.False(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -517,11 +744,11 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup without an existing backup will succeed", - backupExists: false, - backup: defaultBackup().Result(), - backupLocation: defaultBackupLocation, - defaultVolumesToRestic: true, + name: "backup without an existing backup will succeed", + backupExists: false, + backup: defaultBackup().Result(), + backupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -540,8 +767,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -554,12 +781,12 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup specifying a false value for 'DefaultVolumesToRestic' keeps it", + name: "backup specifying a false value for 'DefaultVolumesToFsBackup' keeps it", backupExists: false, - backup: defaultBackup().DefaultVolumesToRestic(false).Result(), + backup: defaultBackup().DefaultVolumesToFsBackup(false).Result(), backupLocation: defaultBackupLocation, // value set in the controller is different from that specified in the backup - defaultVolumesToRestic: true, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -578,8 +805,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.False(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.False(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -592,12 +819,12 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup specifying a true value for 'DefaultVolumesToRestic' keeps it", + name: "backup specifying a true value for 'DefaultVolumesToFsBackup' keeps it", backupExists: false, - backup: defaultBackup().DefaultVolumesToRestic(true).Result(), + backup: defaultBackup().DefaultVolumesToFsBackup(true).Result(), backupLocation: defaultBackupLocation, // value set in the controller is different from that specified in the backup - defaultVolumesToRestic: false, + defaultVolumesToFsBackup: false, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -616,8 +843,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -630,12 +857,12 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup specifying no value for 'DefaultVolumesToRestic' gets the default true value", + name: "backup specifying no value for 'DefaultVolumesToFsBackup' gets the default true value", backupExists: false, backup: defaultBackup().Result(), backupLocation: defaultBackupLocation, // value set in the controller is different from that specified in the backup - defaultVolumesToRestic: true, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -654,8 +881,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -668,12 +895,12 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "backup specifying no value for 'DefaultVolumesToRestic' gets the default false value", + name: "backup specifying no value for 'DefaultVolumesToFsBackup' gets the default false value", backupExists: false, backup: defaultBackup().Result(), backupLocation: defaultBackupLocation, // value set in the controller is different from that specified in the backup - defaultVolumesToRestic: false, + defaultVolumesToFsBackup: false, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -692,8 +919,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.False(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.False(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseCompleted, @@ -708,11 +935,11 @@ func TestProcessBackupCompletions(t *testing.T) { // Failed { - name: "backup with existing backup will fail", - backupExists: true, - backup: defaultBackup().Result(), - backupLocation: defaultBackupLocation, - defaultVolumesToRestic: true, + name: "backup with existing backup will fail", + backupExists: true, + backup: defaultBackup().Result(), + backupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -731,8 +958,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseFailed, @@ -746,11 +973,11 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, { - name: "error when checking if backup exists will cause backup to fail", - backup: defaultBackup().Result(), - existenceCheckError: errors.New("Backup already exists in object storage"), - backupLocation: defaultBackupLocation, - defaultVolumesToRestic: true, + name: "error when checking if backup exists will cause backup to fail", + backup: defaultBackup().Result(), + existenceCheckError: errors.New("Backup already exists in object storage"), + backupLocation: defaultBackupLocation, + defaultVolumesToFsBackup: true, expectedResult: &velerov1api.Backup{ TypeMeta: metav1.TypeMeta{ Kind: "Backup", @@ -769,8 +996,8 @@ func TestProcessBackupCompletions(t *testing.T) { }, }, Spec: velerov1api.BackupSpec{ - StorageLocation: defaultBackupLocation.Name, - DefaultVolumesToRestic: boolptr.True(), + StorageLocation: defaultBackupLocation.Name, + DefaultVolumesToFsBackup: boolptr.True(), }, Status: velerov1api.BackupStatus{ Phase: velerov1api.BackupPhaseFailed, @@ -823,27 +1050,27 @@ func TestProcessBackupCompletions(t *testing.T) { require.NoError(t, err) c := &backupController{ - genericController: newGenericController("backup-test", logger), - discoveryHelper: discoveryHelper, - client: clientset.VeleroV1(), - lister: sharedInformers.Velero().V1().Backups().Lister(), - kbClient: fakeClient, - snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), - defaultBackupLocation: defaultBackupLocation.Name, - defaultVolumesToRestic: test.defaultVolumesToRestic, - backupTracker: NewBackupTracker(), - metrics: metrics.NewServerMetrics(), - clock: clock.NewFakeClock(now), - newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - backupStoreGetter: NewFakeSingleObjectBackupStoreGetter(backupStore), - backupper: backupper, - formatFlag: formatFlag, + genericController: newGenericController("backup-test", logger), + discoveryHelper: discoveryHelper, + client: clientset.VeleroV1(), + lister: sharedInformers.Velero().V1().Backups().Lister(), + kbClient: fakeClient, + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), + defaultBackupLocation: defaultBackupLocation.Name, + defaultVolumesToFsBackup: test.defaultVolumesToFsBackup, + backupTracker: NewBackupTracker(), + metrics: metrics.NewServerMetrics(), + clock: clock.NewFakeClock(now), + newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + backupStoreGetter: NewFakeSingleObjectBackupStoreGetter(backupStore), + backupper: backupper, + formatFlag: formatFlag, } pluginManager.On("GetBackupItemActions").Return(nil, nil) pluginManager.On("CleanupClients").Return(nil) pluginManager.On("GetItemSnapshotters").Return(nil, nil) - backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []velero.BackupItemAction(nil), pluginManager).Return(nil) + backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []biav1.BackupItemAction(nil), pluginManager).Return(nil) backupper.On("BackupWithResolvers", mock.Anything, mock.Anything, mock.Anything, framework.BackupItemActionResolver{}, framework.ItemSnapshotterResolver{}, pluginManager).Return(nil) backupStore.On("BackupExists", test.backupLocation.Spec.StorageType.ObjectStorage.Bucket, test.backup.Name).Return(test.backupExists, test.existenceCheckError) @@ -1007,12 +1234,15 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) ) c := &backupController{ + genericController: newGenericController("backup-test", logger), snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultSnapshotLocations: test.defaultLocations, } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index a616dcb711..826470d703 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -39,13 +39,14 @@ import ( "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/repository" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/podvolume" ) const ( @@ -91,7 +92,7 @@ func NewBackupDeletionReconciler( func (r *backupDeletionReconciler) SetupWithManager(mgr ctrl.Manager) error { // Make sure the expired requests can be deleted eventually - s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.DeleteBackupRequestList{}, time.Hour) + s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.DeleteBackupRequestList{}, time.Hour, kube.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). For(&velerov1api.DeleteBackupRequest{}). Watches(s, nil). @@ -281,7 +282,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque if snapshots, err := backupStore.GetBackupVolumeSnapshots(backup.Name); err != nil { errs = append(errs, errors.Wrap(err, "error getting backup's volume snapshots").Error()) } else { - volumeSnapshotters := make(map[string]velero.VolumeSnapshotter) + volumeSnapshotters := make(map[string]vsv1.VolumeSnapshotter) for _, snapshot := range snapshots { log.WithField("providerSnapshotID", snapshot.Status.ProviderSnapshotID).Info("Removing snapshot associated with backup") @@ -391,7 +392,7 @@ func volumeSnapshottersForVSL( namespace, vslName string, client client.Client, pluginManager clientmgmt.Manager, -) (velero.VolumeSnapshotter, error) { +) (vsv1.VolumeSnapshotter, error) { vsl := &velerov1api.VolumeSnapshotLocation{} if err := client.Get(ctx, types.NamespacedName{ Namespace: namespace, @@ -440,7 +441,7 @@ func (r *backupDeletionReconciler) deleteResticSnapshots(ctx context.Context, ba return nil } - snapshots, err := restic.GetSnapshotsInBackup(ctx, backup, r.Client) + snapshots, err := getSnapshotsInBackup(ctx, backup, r.Client) if err != nil { return []error{err} } @@ -491,3 +492,21 @@ func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *vele } return backup, nil } + +// getSnapshotsInBackup returns a list of all restic snapshot ids associated with +// a given Velero backup. +func getSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbClient client.Client) ([]repository.SnapshotIdentifier, error) { + podVolumeBackups := &velerov1api.PodVolumeBackupList{} + options := &client.ListOptions{ + LabelSelector: labels.Set(map[string]string{ + velerov1api.BackupNameLabel: label.GetValidName(backup.Name), + }).AsSelector(), + } + + err := kbClient.List(ctx, podVolumeBackups, options) + if err != nil { + return nil, errors.WithStack(err) + } + + return podvolume.GetSnapshotIdentifier(podVolumeBackups), nil +} diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index d2de589b24..8604c90b83 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -19,6 +19,7 @@ package controller import ( "bytes" "fmt" + "sort" "time" "context" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -52,6 +54,7 @@ import ( persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks" + "github.com/vmware-tanzu/velero/pkg/repository" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -692,3 +695,175 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { }) } + +func TestGetSnapshotsInBackup(t *testing.T) { + tests := []struct { + name string + podVolumeBackups []velerov1api.PodVolumeBackup + expected []repository.SnapshotIdentifier + longBackupNameEnabled bool + }{ + { + name: "no pod volume backups", + podVolumeBackups: nil, + expected: nil, + }, + { + name: "no pod volume backups with matching label", + podVolumeBackups: []velerov1api.PodVolumeBackup{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, + }, + }, + expected: nil, + }, + { + name: "some pod volume backups with matching label", + podVolumeBackups: []velerov1api.PodVolumeBackup{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""}, + }, + }, + expected: []repository.SnapshotIdentifier{ + { + VolumeNamespace: "ns-1", + SnapshotID: "snap-3", + RepositoryType: "restic", + }, + { + VolumeNamespace: "ns-1", + SnapshotID: "snap-4", + RepositoryType: "restic", + }, + }, + }, + { + name: "some pod volume backups with matching label and backup name greater than 63 chars", + longBackupNameEnabled: true, + podVolumeBackups: []velerov1api.PodVolumeBackup{ + { + ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "the-really-long-backup-name-that-is-much-more-than-63-cha6ca4bc"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ + Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"}, + }, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""}, + }, + }, + expected: []repository.SnapshotIdentifier{ + { + VolumeNamespace: "ns-1", + SnapshotID: "snap-3", + RepositoryType: "restic", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var ( + clientBuilder = velerotest.NewFakeControllerRuntimeClientBuilder(t) + veleroBackup = &velerov1api.Backup{} + ) + + veleroBackup.Name = "backup-1" + + if test.longBackupNameEnabled { + veleroBackup.Name = "the-really-long-backup-name-that-is-much-more-than-63-characters" + } + clientBuilder.WithLists(&velerov1api.PodVolumeBackupList{ + Items: test.podVolumeBackups, + }) + + res, err := getSnapshotsInBackup(context.TODO(), veleroBackup, clientBuilder.Build()) + assert.NoError(t, err) + + // sort to ensure good compare of slices + less := func(snapshots []repository.SnapshotIdentifier) func(i, j int) bool { + return func(i, j int) bool { + if snapshots[i].VolumeNamespace == snapshots[j].VolumeNamespace { + return snapshots[i].SnapshotID < snapshots[j].SnapshotID + } + return snapshots[i].VolumeNamespace < snapshots[j].VolumeNamespace + } + + } + + sort.Slice(test.expected, less(test.expected)) + sort.Slice(res, less(res)) + + assert.Equal(t, test.expected, res) + }) + } +} diff --git a/pkg/controller/backup_storage_location_controller.go b/pkg/controller/backup_storage_location_controller.go index 1b08da897e..793347dec4 100644 --- a/pkg/controller/backup_storage_location_controller.go +++ b/pkg/controller/backup_storage_location_controller.go @@ -44,35 +44,55 @@ const ( ) // BackupStorageLocationReconciler reconciles a BackupStorageLocation object -type BackupStorageLocationReconciler struct { - Ctx context.Context - Client client.Client - Scheme *runtime.Scheme - DefaultBackupLocationInfo storage.DefaultBackupLocationInfo +type backupStorageLocationReconciler struct { + ctx context.Context + client client.Client + scheme *runtime.Scheme + defaultBackupLocationInfo storage.DefaultBackupLocationInfo // use variables to refer to these functions so they can be // replaced with fakes for testing. - NewPluginManager func(logrus.FieldLogger) clientmgmt.Manager - BackupStoreGetter persistence.ObjectBackupStoreGetter + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager + backupStoreGetter persistence.ObjectBackupStoreGetter - Log logrus.FieldLogger + log logrus.FieldLogger +} + +// NewBackupStorageLocationReconciler initialize and return a backupStorageLocationReconciler struct +func NewBackupStorageLocationReconciler( + ctx context.Context, + client client.Client, + scheme *runtime.Scheme, + defaultBackupLocationInfo storage.DefaultBackupLocationInfo, + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, + backupStoreGetter persistence.ObjectBackupStoreGetter, + log logrus.FieldLogger) *backupStorageLocationReconciler { + return &backupStorageLocationReconciler{ + ctx: ctx, + client: client, + scheme: scheme, + defaultBackupLocationInfo: defaultBackupLocationInfo, + newPluginManager: newPluginManager, + backupStoreGetter: backupStoreGetter, + log: log, + } } // +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch -func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *backupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var unavailableErrors []string var location velerov1api.BackupStorageLocation - log := r.Log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String()) + log := r.log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String()) log.Debug("Validating availability of BackupStorageLocation") - locationList, err := storage.ListBackupStorageLocations(r.Ctx, r.Client, req.Namespace) + locationList, err := storage.ListBackupStorageLocations(r.ctx, r.client, req.Namespace) if err != nil { log.WithError(err).Error("No BackupStorageLocations found, at least one is required") return ctrl.Result{}, nil } - pluginManager := r.NewPluginManager(log) + pluginManager := r.newPluginManager(log) defer pluginManager.CleanupClients() var defaultFound bool @@ -93,7 +113,7 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr isDefault := location.Spec.Default // TODO(2.0) remove this check since the server default will be deprecated - if !defaultFound && location.Name == r.DefaultBackupLocationInfo.StorageLocation { + if !defaultFound && location.Name == r.defaultBackupLocationInfo.StorageLocation { // For backward-compatible, to configure the backup storage location as the default if // none of the BSLs be marked as the default and the BSL name matches against the // "velero server --default-backup-storage-location". @@ -117,12 +137,12 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr location.Status.Phase = velerov1api.BackupStorageLocationPhaseAvailable location.Status.Message = "" } - if err := r.Client.Patch(r.Ctx, &location, client.MergeFrom(original)); err != nil { + if err := r.client.Patch(r.ctx, &location, client.MergeFrom(original)); err != nil { log.WithError(err).Error("Error updating BackupStorageLocation phase") } }() - backupStore, err := r.BackupStoreGetter.Get(&location, pluginManager, log) + backupStore, err := r.backupStoreGetter.Get(&location, pluginManager, log) if err != nil { log.WithError(err).Error("Error getting a backup store") return @@ -144,11 +164,11 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, nil } -func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool, locationList velerov1api.BackupStorageLocationList, errs []string) { +func (r *backupStorageLocationReconciler) logReconciledPhase(defaultFound bool, locationList velerov1api.BackupStorageLocationList, errs []string) { var availableBSLs []*velerov1api.BackupStorageLocation var unAvailableBSLs []*velerov1api.BackupStorageLocation var unknownBSLs []*velerov1api.BackupStorageLocation - log := r.Log.WithField("controller", BackupStorageLocation) + log := r.log.WithField("controller", BackupStorageLocation) for i, location := range locationList.Items { phase := location.Status.Phase @@ -181,21 +201,21 @@ func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool, } } -func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *backupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error { g := kube.NewPeriodicalEnqueueSource( - r.Log, + r.log, mgr.GetClient(), &velerov1api.BackupStorageLocationList{}, bslValidationEnqueuePeriod, - // Add filter function to enqueue BSL per ValidationFrequency setting. - func(object client.Object) bool { - location := object.(*velerov1api.BackupStorageLocation) - return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.DefaultBackupLocationInfo.ServerValidationFrequency, r.Log.WithField("controller", BackupStorageLocation)) - }, + kube.PeriodicalEnqueueSourceOption{}, ) + gp := kube.NewGenericEventPredicate(func(object client.Object) bool { + location := object.(*velerov1api.BackupStorageLocation) + return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.defaultBackupLocationInfo.ServerValidationFrequency, r.log.WithField("controller", BackupStorageLocation)) + }) return ctrl.NewControllerManagedBy(mgr). // As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.SpecChangePredicate{})). - Watches(g, nil). + Watches(g, nil, builder.WithPredicates(gp)). Complete(r) } diff --git a/pkg/controller/backup_storage_location_controller_test.go b/pkg/controller/backup_storage_location_controller_test.go index 75ad691a25..06d3458c4d 100644 --- a/pkg/controller/backup_storage_location_controller_test.go +++ b/pkg/controller/backup_storage_location_controller_test.go @@ -79,16 +79,16 @@ var _ = Describe("Backup Storage Location Reconciler", func() { // Setup reconciler Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed()) - r := BackupStorageLocationReconciler{ - Ctx: ctx, - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(), - DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{ + r := backupStorageLocationReconciler{ + ctx: ctx, + client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(), + defaultBackupLocationInfo: storage.DefaultBackupLocationInfo{ StorageLocation: "location-1", ServerValidationFrequency: 0, }, - NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), - Log: velerotest.NewLogger(), + newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), + log: velerotest.NewLogger(), } // Assertions @@ -101,7 +101,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() { key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace} instance := &velerov1api.BackupStorageLocation{} - err = r.Client.Get(ctx, key, instance) + err = r.client.Get(ctx, key, instance) Expect(err).To(BeNil()) Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault)) Expect(instance.Status.Phase).To(BeIdenticalTo(tests[i].expectedPhase)) @@ -144,16 +144,16 @@ var _ = Describe("Backup Storage Location Reconciler", func() { // Setup reconciler Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed()) - r := BackupStorageLocationReconciler{ - Ctx: ctx, - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(), - DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{ + r := backupStorageLocationReconciler{ + ctx: ctx, + client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(), + defaultBackupLocationInfo: storage.DefaultBackupLocationInfo{ StorageLocation: "default", ServerValidationFrequency: 0, }, - NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), - Log: velerotest.NewLogger(), + newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), + log: velerotest.NewLogger(), } // Assertions @@ -166,7 +166,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() { key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace} instance := &velerov1api.BackupStorageLocation{} - err = r.Client.Get(ctx, key, instance) + err = r.client.Get(ctx, key, instance) Expect(err).To(BeNil()) Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault)) } diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index c19badd6e3..8352e3ccd6 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -1,5 +1,5 @@ /* -Copyright 2020 the Velero contributors. +Copyright The Velero Contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,378 +21,402 @@ import ( "time" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + apierrors "k8s.io/apimachinery/pkg/api/errors" kuberrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/util/kube" - - "github.com/vmware-tanzu/velero/internal/storage" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/features" - velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" - velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" + "github.com/vmware-tanzu/velero/pkg/util/kube" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) -type backupSyncController struct { - *genericController +const ( + backupSyncReconcilePeriod = time.Minute +) - backupClient velerov1client.BackupsGetter - kbClient client.Client - podVolumeBackupClient velerov1client.PodVolumeBackupsGetter - backupLister velerov1listers.BackupLister - csiVSLister snapshotv1listers.VolumeSnapshotLister - csiSnapshotClient *snapshotterClientSet.Clientset - kubeClient kubernetes.Interface +type backupSyncReconciler struct { + client client.Client namespace string - defaultBackupLocation string defaultBackupSyncPeriod time.Duration newPluginManager func(logrus.FieldLogger) clientmgmt.Manager backupStoreGetter persistence.ObjectBackupStoreGetter + logger logrus.FieldLogger } -func NewBackupSyncController( - backupClient velerov1client.BackupsGetter, - kbClient client.Client, - podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, - backupLister velerov1listers.BackupLister, - csiVSLister snapshotv1listers.VolumeSnapshotLister, - syncPeriod time.Duration, +// NewBackupSyncReconciler is used to generate BackupSync reconciler structure. +func NewBackupSyncReconciler( + client client.Client, namespace string, - csiSnapshotClient *snapshotterClientSet.Clientset, - kubeClient kubernetes.Interface, - defaultBackupLocation string, + defaultBackupSyncPeriod time.Duration, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, backupStoreGetter persistence.ObjectBackupStoreGetter, - logger logrus.FieldLogger, -) Interface { - if syncPeriod <= 0 { - syncPeriod = time.Minute - } - logger.Infof("Backup sync period is %v", syncPeriod) - - c := &backupSyncController{ - genericController: newGenericController(BackupSync, logger), - backupClient: backupClient, - kbClient: kbClient, - podVolumeBackupClient: podVolumeBackupClient, + logger logrus.FieldLogger) *backupSyncReconciler { + return &backupSyncReconciler{ + client: client, namespace: namespace, - defaultBackupLocation: defaultBackupLocation, - defaultBackupSyncPeriod: syncPeriod, - backupLister: backupLister, - csiVSLister: csiVSLister, - csiSnapshotClient: csiSnapshotClient, - kubeClient: kubeClient, - - // use variables to refer to these functions so they can be - // replaced with fakes for testing. - newPluginManager: newPluginManager, - backupStoreGetter: backupStoreGetter, + defaultBackupSyncPeriod: defaultBackupSyncPeriod, + newPluginManager: newPluginManager, + backupStoreGetter: backupStoreGetter, + logger: logger, } - - c.resyncFunc = c.run - c.resyncPeriod = 30 * time.Second - - return c } -// orderedBackupLocations returns a new slice with the default backup location first (if it exists), -// followed by the rest of the locations in no particular order. -func orderedBackupLocations(locationList *velerov1api.BackupStorageLocationList, defaultLocationName string) []velerov1api.BackupStorageLocation { - var result []velerov1api.BackupStorageLocation +// Reconcile syncs between the backups in cluster and backups metadata in object store. +func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := b.logger.WithField("controller", BackupSync) + log = log.WithField("backupLocation", req.String()) + log.Debug("Begin to sync between backups' metadata in BSL object storage and cluster's existing backups.") - for i := range locationList.Items { - if locationList.Items[i].Name == defaultLocationName { - // put the default location first - result = append(result, locationList.Items[i]) - // append everything before the default - result = append(result, locationList.Items[:i]...) - // append everything after the default - result = append(result, locationList.Items[i+1:]...) - - return result + location := &velerov1api.BackupStorageLocation{} + err := b.client.Get(ctx, req.NamespacedName, location) + if err != nil { + if apierrors.IsNotFound(err) { + log.Debug("BackupStorageLocation is not found") + return ctrl.Result{}, nil } + return ctrl.Result{}, errors.Wrapf(err, "error getting BackupStorageLocation %s", req.String()) } - return locationList.Items -} + pluginManager := b.newPluginManager(log) + defer pluginManager.CleanupClients() -func (c *backupSyncController) run() { - c.logger.Debug("Checking for existing backup storage locations to sync into cluster") + log.Debug("Checking backup location for backups to sync into cluster") - locationList, err := storage.ListBackupStorageLocations(context.Background(), c.kbClient, c.namespace) + backupStore, err := b.backupStoreGetter.Get(location, pluginManager, log) if err != nil { - c.logger.WithError(err).Error("No backup storage locations found, at least one is required") - return + log.WithError(err).Error("Error getting backup store for this location") + return ctrl.Result{}, nil } - // sync the default backup storage location first, if it exists - for _, location := range locationList.Items { - if location.Spec.Default { - c.defaultBackupLocation = location.Name - break - } + // get a list of all the backups that are stored in the backup storage location + res, err := backupStore.ListBackups() + if err != nil { + log.WithError(err).Error("Error listing backups in backup store") + return ctrl.Result{}, nil + } + backupStoreBackups := sets.NewString(res...) + log.WithField("backupCount", len(backupStoreBackups)).Debug("Got backups from backup store") + + // get a list of all the backups that exist as custom resources in the cluster + var clusterBackupList velerov1api.BackupList + listOption := client.ListOptions{ + LabelSelector: labels.Everything(), + Namespace: b.namespace, } - locations := orderedBackupLocations(&locationList, c.defaultBackupLocation) - - pluginManager := c.newPluginManager(c.logger) - defer pluginManager.CleanupClients() - - for _, location := range locations { - log := c.logger.WithField("backupLocation", location.Name) - syncPeriod := c.defaultBackupSyncPeriod - if location.Spec.BackupSyncPeriod != nil { - syncPeriod = location.Spec.BackupSyncPeriod.Duration - if syncPeriod == 0 { - log.Debug("Backup sync period for this location is set to 0, skipping sync") - continue - } + err = b.client.List(ctx, &clusterBackupList, &listOption) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error getting backups from cluster, proceeding with sync into cluster") + } else { + log.WithField("backupCount", len(clusterBackupList.Items)).Debug("Got backups from cluster") + } - if syncPeriod < 0 { - log.Debug("Backup sync period must be non-negative") - syncPeriod = c.defaultBackupSyncPeriod - } - } + // get a list of backups that *are* in the backup storage location and *aren't* in the cluster + clusterBackupsSet := sets.NewString() + for _, b := range clusterBackupList.Items { + clusterBackupsSet.Insert(b.Name) + } + backupsToSync := backupStoreBackups.Difference(clusterBackupsSet) - lastSync := location.Status.LastSyncedTime - if lastSync != nil { - log.Debug("Checking if backups need to be synced at this time for this location") - nextSync := lastSync.Add(syncPeriod) - if time.Now().UTC().Before(nextSync) { - continue - } - } + if count := backupsToSync.Len(); count > 0 { + log.Infof("Found %v backups in the backup location that do not exist in the cluster and need to be synced", count) + } else { + log.Debug("No backups found in the backup location that need to be synced into the cluster") + } - log.Debug("Checking backup location for backups to sync into cluster") + // sync each backup + for backupName := range backupsToSync { + log = log.WithField("backup", backupName) + log.Info("Attempting to sync backup into cluster") - backupStore, err := c.backupStoreGetter.Get(&location, pluginManager, log) + backup, err := backupStore.GetBackupMetadata(backupName) if err != nil { - log.WithError(err).Error("Error getting backup store for this location") + log.WithError(errors.WithStack(err)).Error("Error getting backup metadata from backup store") continue } - // get a list of all the backups that are stored in the backup storage location - res, err := backupStore.ListBackups() - if err != nil { - log.WithError(err).Error("Error listing backups in backup store") - continue - } - backupStoreBackups := sets.NewString(res...) - log.WithField("backupCount", len(backupStoreBackups)).Debug("Got backups from backup store") + backup.Namespace = b.namespace + backup.ResourceVersion = "" - // get a list of all the backups that exist as custom resources in the cluster - clusterBackups, err := c.backupLister.Backups(c.namespace).List(labels.Everything()) - if err != nil { - log.WithError(errors.WithStack(err)).Error("Error getting backups from cluster, proceeding with sync into cluster") - } else { - log.WithField("backupCount", len(clusterBackups)).Debug("Got backups from cluster") + // update the StorageLocation field and label since the name of the location + // may be different in this cluster than in the cluster that created the + // backup. + backup.Spec.StorageLocation = location.Name + if backup.Labels == nil { + backup.Labels = make(map[string]string) } + backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) - // get a list of backups that *are* in the backup storage location and *aren't* in the cluster - clusterBackupsSet := sets.NewString() - for _, b := range clusterBackups { - clusterBackupsSet.Insert(b.Name) + // attempt to create backup custom resource via API + err = b.client.Create(ctx, backup, &client.CreateOptions{}) + switch { + case err != nil && kuberrs.IsAlreadyExists(err): + log.Debug("Backup already exists in cluster") + continue + case err != nil && !kuberrs.IsAlreadyExists(err): + log.WithError(errors.WithStack(err)).Error("Error syncing backup into cluster") + continue + default: + log.Info("Successfully synced backup into cluster") } - backupsToSync := backupStoreBackups.Difference(clusterBackupsSet) - if count := backupsToSync.Len(); count > 0 { - log.Infof("Found %v backups in the backup location that do not exist in the cluster and need to be synced", count) - } else { - log.Debug("No backups found in the backup location that need to be synced into the cluster") + // process the pod volume backups from object store, if any + podVolumeBackups, err := backupStore.GetPodVolumeBackups(backupName) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error getting pod volume backups for this backup from backup store") + continue } - // sync each backup - for backupName := range backupsToSync { - log = log.WithField("backup", backupName) - log.Info("Attempting to sync backup into cluster") + for _, podVolumeBackup := range podVolumeBackups { + log := log.WithField("podVolumeBackup", podVolumeBackup.Name) + log.Debug("Checking this pod volume backup to see if it needs to be synced into the cluster") - backup, err := backupStore.GetBackupMetadata(backupName) - if err != nil { - log.WithError(errors.WithStack(err)).Error("Error getting backup metadata from backup store") - continue + for i, ownerRef := range podVolumeBackup.OwnerReferences { + if ownerRef.APIVersion == velerov1api.SchemeGroupVersion.String() && ownerRef.Kind == "Backup" && ownerRef.Name == backup.Name { + log.WithField("uid", backup.UID).Debugf("Updating pod volume backup's owner reference UID") + podVolumeBackup.OwnerReferences[i].UID = backup.UID + } } - backup.Namespace = c.namespace - backup.ResourceVersion = "" - - // update the StorageLocation field and label since the name of the location - // may be different in this cluster than in the cluster that created the - // backup. - backup.Spec.StorageLocation = location.Name - if backup.Labels == nil { - backup.Labels = make(map[string]string) + if _, ok := podVolumeBackup.Labels[velerov1api.BackupUIDLabel]; ok { + podVolumeBackup.Labels[velerov1api.BackupUIDLabel] = string(backup.UID) } - backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) - // attempt to create backup custom resource via API - backup, err = c.backupClient.Backups(backup.Namespace).Create(context.TODO(), backup, metav1.CreateOptions{}) + podVolumeBackup.Namespace = backup.Namespace + podVolumeBackup.ResourceVersion = "" + + err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{}) switch { case err != nil && kuberrs.IsAlreadyExists(err): - log.Debug("Backup already exists in cluster") + log.Debug("Pod volume backup already exists in cluster") continue case err != nil && !kuberrs.IsAlreadyExists(err): - log.WithError(errors.WithStack(err)).Error("Error syncing backup into cluster") + log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster") continue default: - log.Info("Successfully synced backup into cluster") + log.Debug("Synced pod volume backup into cluster") } + } - // process the pod volume backups from object store, if any - podVolumeBackups, err := backupStore.GetPodVolumeBackups(backupName) + if features.IsEnabled(velerov1api.CSIFeatureFlag) { + // we are syncing these objects only to ensure that the storage snapshots are cleaned up + // on backup deletion or expiry. + log.Info("Syncing CSI VolumeSnapshotClasses in backup") + vsClasses, err := backupStore.GetCSIVolumeSnapshotClasses(backupName) if err != nil { - log.WithError(errors.WithStack(err)).Error("Error getting pod volume backups for this backup from backup store") + log.WithError(errors.WithStack(err)).Error("Error getting CSI VolumeSnapClasses for this backup from backup store") continue } - - for _, podVolumeBackup := range podVolumeBackups { - log := log.WithField("podVolumeBackup", podVolumeBackup.Name) - log.Debug("Checking this pod volume backup to see if it needs to be synced into the cluster") - - for i, ownerRef := range podVolumeBackup.OwnerReferences { - if ownerRef.APIVersion == velerov1api.SchemeGroupVersion.String() && ownerRef.Kind == "Backup" && ownerRef.Name == backup.Name { - log.WithField("uid", backup.UID).Debugf("Updating pod volume backup's owner reference UID") - podVolumeBackup.OwnerReferences[i].UID = backup.UID - } - } - - if _, ok := podVolumeBackup.Labels[velerov1api.BackupUIDLabel]; ok { - podVolumeBackup.Labels[velerov1api.BackupUIDLabel] = string(backup.UID) - } - - podVolumeBackup.Namespace = backup.Namespace - podVolumeBackup.ResourceVersion = "" - - _, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(context.TODO(), podVolumeBackup, metav1.CreateOptions{}) + for _, vsClass := range vsClasses { + vsClass.ResourceVersion = "" + err := b.client.Create(ctx, vsClass, &client.CreateOptions{}) switch { case err != nil && kuberrs.IsAlreadyExists(err): - log.Debug("Pod volume backup already exists in cluster") + log.Debugf("VolumeSnapshotClass %s already exists in cluster", vsClass.Name) continue case err != nil && !kuberrs.IsAlreadyExists(err): - log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster") + log.WithError(errors.WithStack(err)).Errorf("Error syncing VolumeSnapshotClass %s into cluster", vsClass.Name) continue default: - log.Debug("Synced pod volume backup into cluster") + log.Infof("Created CSI VolumeSnapshotClass %s", vsClass.Name) } } - if features.IsEnabled(velerov1api.CSIFeatureFlag) { - // we are syncing these objects only to ensure that the storage snapshots are cleaned up - // on backup deletion or expiry. - log.Info("Syncing CSI volumesnapshotclasses in backup") - vsClasses, err := backupStore.GetCSIVolumeSnapshotClasses(backupName) - if err != nil { - log.WithError(errors.WithStack(err)).Error("Error getting CSI volumesnapclasses for this backup from backup store") - continue - } - for _, vsClass := range vsClasses { - vsClass.ResourceVersion = "" - created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotClasses().Create(context.TODO(), vsClass, metav1.CreateOptions{}) - if err != nil { - log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotclass %s into cluster", vsClass.Name) - continue - } - log.Infof("Created CSI volumesnapshotclass %s", created.Name) - } + log.Info("Syncing CSI volumesnapshotcontents in backup") + snapConts, err := backupStore.GetCSIVolumeSnapshotContents(backupName) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error getting CSI volumesnapshotcontents for this backup from backup store") + continue + } - log.Info("Syncing CSI volumesnapshotcontents in backup") - snapConts, err := backupStore.GetCSIVolumeSnapshotContents(backupName) - if err != nil { - log.WithError(errors.WithStack(err)).Error("Error getting CSI volumesnapshotcontents for this backup from backup store") + log.Infof("Syncing %d CSI volumesnapshotcontents in backup", len(snapConts)) + for _, snapCont := range snapConts { + // TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents + snapCont.ResourceVersion = "" + err := b.client.Create(ctx, snapCont, &client.CreateOptions{}) + switch { + case err != nil && kuberrs.IsAlreadyExists(err): + log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name) continue - } - - log.Infof("Syncing %d CSI volumesnapshotcontents in backup", len(snapConts)) - for _, snapCont := range snapConts { - // TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents - snapCont.ResourceVersion = "" - created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotContents().Create(context.TODO(), snapCont, metav1.CreateOptions{}) - switch { - case err != nil && kuberrs.IsAlreadyExists(err): - log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name) - continue - case err != nil && !kuberrs.IsAlreadyExists(err): - log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotcontent %s into cluster", snapCont.Name) - continue - default: - log.Infof("Created CSI volumesnapshotcontent %s", created.Name) - } + case err != nil && !kuberrs.IsAlreadyExists(err): + log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotcontent %s into cluster", snapCont.Name) + continue + default: + log.Infof("Created CSI volumesnapshotcontent %s", snapCont.Name) } } } + } - c.deleteOrphanedBackups(location.Name, backupStoreBackups, log) + b.deleteOrphanedBackups(ctx, location.Name, backupStoreBackups, log) - // update the location's last-synced time field - statusPatch := client.MergeFrom(location.DeepCopy()) - location.Status.LastSyncedTime = &metav1.Time{Time: time.Now().UTC()} - if err := c.kbClient.Patch(context.Background(), &location, statusPatch); err != nil { - log.WithError(errors.WithStack(err)).Error("Error patching backup location's last-synced time") - continue - } + // update the location's last-synced time field + statusPatch := client.MergeFrom(location.DeepCopy()) + location.Status.LastSyncedTime = &metav1.Time{Time: time.Now().UTC()} + if err := b.client.Patch(ctx, location, statusPatch); err != nil { + log.WithError(errors.WithStack(err)).Error("Error patching backup location's last-synced time") + return ctrl.Result{}, nil } + + return ctrl.Result{}, nil +} + +// SetupWithManager is used to setup controller and its watching sources. +func (b *backupSyncReconciler) SetupWithManager(mgr ctrl.Manager) error { + backupSyncSource := kube.NewPeriodicalEnqueueSource( + b.logger, + mgr.GetClient(), + &velerov1api.BackupStorageLocationList{}, + backupSyncReconcilePeriod, + kube.PeriodicalEnqueueSourceOption{ + OrderFunc: backupSyncSourceOrderFunc, + }, + ) + + gp := kube.NewGenericEventPredicate(func(object client.Object) bool { + location := object.(*velerov1api.BackupStorageLocation) + return b.locationFilterFunc(location) + }) + + return ctrl.NewControllerManagedBy(mgr). + // Filter all BSL events, because this controller is supposed to run periodically, not by event. + For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.FalsePredicate{})). + Watches(backupSyncSource, nil, builder.WithPredicates(gp)). + Complete(b) } // deleteOrphanedBackups deletes backup objects (CRDs) from Kubernetes that have the specified location // and a phase of Completed, but no corresponding backup in object storage. -func (c *backupSyncController) deleteOrphanedBackups(locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) { - locationSelector := labels.Set(map[string]string{ - velerov1api.StorageLocationLabel: label.GetValidName(locationName), - }).AsSelector() - - backups, err := c.backupLister.Backups(c.namespace).List(locationSelector) +func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) { + var backupList velerov1api.BackupList + listOption := client.ListOptions{ + LabelSelector: labels.Set(map[string]string{ + velerov1api.StorageLocationLabel: label.GetValidName(locationName), + }).AsSelector(), + } + err := b.client.List(ctx, &backupList, &listOption) if err != nil { log.WithError(errors.WithStack(err)).Error("Error listing backups from cluster") return } - if len(backups) == 0 { + + if len(backupList.Items) == 0 { return } - for _, backup := range backups { + for _, backup := range backupList.Items { log = log.WithField("backup", backup.Name) if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) { continue } - if err := c.backupClient.Backups(backup.Namespace).Delete(context.TODO(), backup.Name, metav1.DeleteOptions{}); err != nil { + + if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster") } else { log.Debug("Deleted orphaned backup from cluster") - c.deleteCSISnapshotsByBackup(backup.Name, log) + b.deleteCSISnapshotsByBackup(ctx, backup.Name, log) } } } -func (c *backupSyncController) deleteCSISnapshotsByBackup(backupName string, log logrus.FieldLogger) { +func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, backupName string, log logrus.FieldLogger) { if !features.IsEnabled(velerov1api.CSIFeatureFlag) { return } m := client.MatchingLabels{velerov1api.BackupNameLabel: label.GetValidName(backupName)} - if vsList, err := c.csiVSLister.List(label.NewSelectorForBackup(label.GetValidName(backupName))); err != nil { + var vsList snapshotv1api.VolumeSnapshotList + listOptions := &client.ListOptions{ + LabelSelector: label.NewSelectorForBackup(label.GetValidName(backupName)), + } + if err := b.client.List(ctx, &vsList, listOptions); err != nil { log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName) } else { - for _, vs := range vsList { + for _, vs := range vsList.Items { name := kube.NamespaceAndName(vs.GetObjectMeta()) log.Debugf("Deleting volumesnapshot %s", name) - if err := c.kbClient.Delete(context.TODO(), vs); err != nil { + if err := b.client.Delete(context.TODO(), &vs); err != nil { log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name) } } } vsc := &snapshotv1api.VolumeSnapshotContent{} log.Debugf("Deleting volumesnapshotcontents for backup: %s", backupName) - if err := c.kbClient.DeleteAllOf(context.TODO(), vsc, m); err != nil { + if err := b.client.DeleteAllOf(context.TODO(), vsc, m); err != nil { log.WithError(err).Warnf("Failed to delete volumesnapshotcontents for backup: %s", backupName) } } + +// backupSyncSourceOrderFunc returns a new slice with the default backup location first (if it exists), +// followed by the rest of the locations in no particular order. +func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList { + inputBSLList := objList.(*velerov1api.BackupStorageLocationList) + resultBSLList := &velerov1api.BackupStorageLocationList{} + bslArray := make([]runtime.Object, 0) + + if len(inputBSLList.Items) <= 0 { + return objList + } + + for i := range inputBSLList.Items { + location := inputBSLList.Items[i] + + // sync the default backup storage location first, if it exists + if location.Spec.Default { + // put the default location first + bslArray = append(bslArray, &inputBSLList.Items[i]) + // append everything before the default + for _, bsl := range inputBSLList.Items[:i] { + bslArray = append(bslArray, &bsl) + } + // append everything after the default + for _, bsl := range inputBSLList.Items[i+1:] { + bslArray = append(bslArray, &bsl) + } + meta.SetList(resultBSLList, bslArray) + + return resultBSLList + } + } + + // No default BSL found. Return the input. + return objList +} + +func (b *backupSyncReconciler) locationFilterFunc(location *velerov1api.BackupStorageLocation) bool { + syncPeriod := b.defaultBackupSyncPeriod + if location.Spec.BackupSyncPeriod != nil { + syncPeriod = location.Spec.BackupSyncPeriod.Duration + if syncPeriod == 0 { + b.logger.Debug("Backup sync period for this location is set to 0, skipping sync") + return false + } + + if syncPeriod < 0 { + b.logger.Debug("Backup sync period must be non-negative") + syncPeriod = b.defaultBackupSyncPeriod + } + } + + lastSync := location.Status.LastSyncedTime + if lastSync != nil { + b.logger.Debug("Checking if backups need to be synced at this time for this location") + nextSync := lastSync.Add(syncPeriod) + if time.Now().UTC().Before(nextSync) { + return false + } + } + return true +} diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index 6ba8253733..4f1e280c55 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -18,21 +18,27 @@ package controller import ( "context" - "testing" + "fmt" "time" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" core "k8s.io/client-go/testing" + ctrl "sigs.k8s.io/controller-runtime" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" - informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" "github.com/vmware-tanzu/velero/pkg/label" persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" @@ -40,12 +46,30 @@ import ( velerotest "github.com/vmware-tanzu/velero/pkg/test" ) +func defaultLocation(namespace string) *velerov1api.BackupStorageLocation { + return &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "location-1", + }, + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "objStoreProvider", + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "bucket-1", + }, + }, + Default: true, + }, + } +} + func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation { return []*velerov1api.BackupStorageLocation{ { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "location-1", + Name: "location-0", }, Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", @@ -54,32 +78,27 @@ func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation Bucket: "bucket-1", }, }, - Default: true, }, }, { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "location-2", + Name: "location-1", }, Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", StorageType: velerov1api.StorageType{ ObjectStorage: &velerov1api.ObjectStorageLocation{ - Bucket: "bucket-2", + Bucket: "bucket-1", }, }, + Default: true, }, }, - } -} - -func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api.BackupStorageLocation { - return []*velerov1api.BackupStorageLocation{ { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "the-really-long-location-name-that-is-much-more-than-63-characters-1", + Name: "location-2", }, Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", @@ -93,13 +112,13 @@ func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "the-really-long-location-name-that-is-much-more-than-63-characters-2", + Name: "location-3", }, Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", StorageType: velerov1api.StorageType{ ObjectStorage: &velerov1api.ObjectStorageLocation{ - Bucket: "bucket-2", + Bucket: "bucket-1", }, }, }, @@ -107,485 +126,447 @@ func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api } } -func TestBackupSyncControllerRun(t *testing.T) { - type cloudBackupData struct { - backup *velerov1api.Backup - podVolumeBackups []*velerov1api.PodVolumeBackup +func defaultLocationWithLongerLocationName(namespace string) *velerov1api.BackupStorageLocation { + return &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "the-really-long-location-name-that-is-much-more-than-63-characters-1", + }, + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "objStoreProvider", + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "bucket-1", + }, + }, + }, } +} - tests := []struct { - name string - namespace string - locations []*velerov1api.BackupStorageLocation - cloudBuckets map[string][]*cloudBackupData - existingBackups []*velerov1api.Backup - existingPodVolumeBackups []*velerov1api.PodVolumeBackup - longLocationNameEnabled bool - }{ - { - name: "no cloud backups", - }, - { - name: "normal case", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ +func numBackups(c ctrlClient.WithWatch, ns string) (int, error) { + var existingK8SBackups velerov1api.BackupList + err := c.List(context.TODO(), &existingK8SBackups, &ctrlClient.ListOptions{}) + if err != nil { + return 0, err + } + + return len(existingK8SBackups.Items), nil +} + +var _ = Describe("Backup Sync Reconciler", func() { + It("Test Backup Sync Reconciler basic function", func() { + type cloudBackupData struct { + backup *velerov1api.Backup + podVolumeBackups []*velerov1api.PodVolumeBackup + } + + tests := []struct { + name string + namespace string + location *velerov1api.BackupStorageLocation + cloudBackups []*cloudBackupData + existingBackups []*velerov1api.Backup + existingPodVolumeBackups []*velerov1api.PodVolumeBackup + longLocationNameEnabled bool + }{ + { + name: "no cloud backups", + namespace: "ns-1", + location: defaultLocation("ns-1"), + }, + { + name: "normal case", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").Result(), - }, - }, }, - }, - { - name: "all synced backups get created in Velero server's namespace", - namespace: "velero", - locations: defaultLocationsList("velero"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "all synced backups get created in Velero server's namespace", + namespace: "velero", + location: defaultLocation("velero"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-2", "backup-3").Result(), - }, - &cloudBackupData{ - backup: builder.ForBackup("velero", "backup-4").Result(), - }, - }, }, - }, - { - name: "new backups get synced when some cloud backups already exist in the cluster", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "new backups get synced when some cloud backups already exist in the cluster", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").Result(), - }, - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-4").Result(), - }, + existingBackups: []*velerov1api.Backup{ + // add a label to each existing backup so we can differentiate it from the cloud + // backup during verification + builder.ForBackup("ns-1", "backup-1").StorageLocation("location-1").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), + builder.ForBackup("ns-1", "backup-3").StorageLocation("location-2").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), }, }, - existingBackups: []*velerov1api.Backup{ - // add a label to each existing backup so we can differentiate it from the cloud - // backup during verification - builder.ForBackup("ns-1", "backup-1").StorageLocation("location-1").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), - builder.ForBackup("ns-1", "backup-3").StorageLocation("location-2").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), - }, - }, - { - name: "existing backups without a StorageLocation get it filled in", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "existing backups without a StorageLocation get it filled in", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), }, }, + existingBackups: []*velerov1api.Backup{ + // add a label to each existing backup so we can differentiate it from the cloud + // backup during verification + builder.ForBackup("ns-1", "backup-1").ObjectMeta(builder.WithLabels("i-exist", "true")).StorageLocation("location-1").Result(), + }, }, - existingBackups: []*velerov1api.Backup{ - // add a label to each existing backup so we can differentiate it from the cloud - // backup during verification - builder.ForBackup("ns-1", "backup-1").ObjectMeta(builder.WithLabels("i-exist", "true")).StorageLocation("location-1").Result(), - }, - }, - { - name: "backup storage location names and labels get updated", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "backup storage location names and labels get updated", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(), }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(), - }, - }, }, - }, - { - name: "backup storage location names and labels get updated with location name greater than 63 chars", - namespace: "ns-1", - locations: defaultLocationsListWithLongerLocationName("ns-1"), - longLocationNameEnabled: true, - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "backup storage location names and labels get updated with location name greater than 63 chars", + namespace: "ns-1", + location: defaultLocationWithLongerLocationName("ns-1"), + longLocationNameEnabled: true, + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(), }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(), - }, - }, }, - }, - { - name: "all synced backups and pod volume backups get created in Velero server's namespace", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "all synced backups and pod volume backups get created in Velero server's namespace", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), podVolumeBackups: []*velerov1api.PodVolumeBackup{ builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), }, }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), podVolumeBackups: []*velerov1api.PodVolumeBackup{ builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), }, }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").Result(), - }, - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-4").Result(), - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), - builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), - builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(), - }, - }, - }, }, - }, - { - name: "new pod volume backups get synched when some pod volume backups already exist in the cluster", - namespace: "ns-1", - locations: defaultLocationsList("ns-1"), - cloudBuckets: map[string][]*cloudBackupData{ - "bucket-1": { - &cloudBackupData{ + { + name: "new pod volume backups get synched when some pod volume backups already exist in the cluster", + namespace: "ns-1", + location: defaultLocation("ns-1"), + cloudBackups: []*cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-1").Result(), podVolumeBackups: []*velerov1api.PodVolumeBackup{ builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), }, }, - &cloudBackupData{ + { backup: builder.ForBackup("ns-1", "backup-2").Result(), podVolumeBackups: []*velerov1api.PodVolumeBackup{ builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(), }, }, }, - "bucket-2": { - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-3").Result(), - }, - &cloudBackupData{ - backup: builder.ForBackup("ns-1", "backup-4").Result(), - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), - builder.ForPodVolumeBackup("ns-1", "pvb-5").Result(), - builder.ForPodVolumeBackup("ns-1", "pvb-6").Result(), - }, - }, + existingPodVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), }, }, - existingPodVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), - builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), - }, - }, - } + } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, test := range tests { var ( - client = fake.NewSimpleClientset() - fakeClient = velerotest.NewFakeControllerRuntimeClient(t) - sharedInformers = informers.NewSharedInformerFactory(client, 0) - pluginManager = &pluginmocks.Manager{} - backupStores = make(map[string]*persistencemocks.BackupStore) + client = ctrlfake.NewClientBuilder().Build() + pluginManager = &pluginmocks.Manager{} + backupStores = make(map[string]*persistencemocks.BackupStore) ) - c := NewBackupSyncController( - client.VeleroV1(), - fakeClient, - client.VeleroV1(), - sharedInformers.Velero().V1().Backups().Lister(), - nil, // csiVSLister - time.Duration(0), - test.namespace, - nil, // csiSnapshotClient - nil, // kubeClient - "", - func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - NewFakeObjectBackupStoreGetter(backupStores), - velerotest.NewLogger(), - ).(*backupSyncController) - pluginManager.On("CleanupClients").Return(nil) - - for _, location := range test.locations { - require.NoError(t, fakeClient.Create(context.Background(), location)) - backupStores[location.Name] = &persistencemocks.BackupStore{} + r := backupSyncReconciler{ + client: client, + namespace: test.namespace, + defaultBackupSyncPeriod: time.Second * 10, + newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), + logger: velerotest.NewLogger(), } - for _, location := range test.locations { - backupStore, ok := backupStores[location.Name] - require.True(t, ok, "no mock backup store for location %s", location.Name) + if test.location != nil { + Expect(r.client.Create(ctx, test.location)).ShouldNot(HaveOccurred()) + backupStores[test.location.Name] = &persistencemocks.BackupStore{} + + backupStore, ok := backupStores[test.location.Name] + Expect(ok).To(BeTrue(), "no mock backup store for location %s", test.location.Name) var backupNames []string - for _, bucket := range test.cloudBuckets[location.Spec.ObjectStorage.Bucket] { - backupNames = append(backupNames, bucket.backup.Name) - backupStore.On("GetBackupMetadata", bucket.backup.Name).Return(bucket.backup, nil) - backupStore.On("GetPodVolumeBackups", bucket.backup.Name).Return(bucket.podVolumeBackups, nil) + for _, backup := range test.cloudBackups { + backupNames = append(backupNames, backup.backup.Name) + backupStore.On("GetBackupMetadata", backup.backup.Name).Return(backup.backup, nil) + backupStore.On("GetPodVolumeBackups", backup.backup.Name).Return(backup.podVolumeBackups, nil) } backupStore.On("ListBackups").Return(backupNames, nil) } for _, existingBackup := range test.existingBackups { - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(existingBackup)) - - _, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), existingBackup, metav1.CreateOptions{}) - require.NoError(t, err) + err := client.Create(context.TODO(), existingBackup, &ctrlClient.CreateOptions{}) + Expect(err).ShouldNot(HaveOccurred()) } for _, existingPodVolumeBackup := range test.existingPodVolumeBackups { - require.NoError(t, sharedInformers.Velero().V1().PodVolumeBackups().Informer().GetStore().Add(existingPodVolumeBackup)) - - _, err := client.VeleroV1().PodVolumeBackups(test.namespace).Create(context.TODO(), existingPodVolumeBackup, metav1.CreateOptions{}) - require.NoError(t, err) + err := client.Create(context.TODO(), existingPodVolumeBackup, &ctrlClient.CreateOptions{}) + Expect(err).ShouldNot(HaveOccurred()) } - client.ClearActions() - c.run() - - for bucket, backupDataSet := range test.cloudBuckets { - // figure out which location this bucket is for; we need this for verification - // purposes later - var location *velerov1api.BackupStorageLocation - for _, loc := range test.locations { - if loc.Spec.ObjectStorage.Bucket == bucket { - location = loc + actualResult, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{Namespace: test.location.Namespace, Name: test.location.Name}, + }) + + Expect(actualResult).To(BeEquivalentTo(ctrl.Result{})) + Expect(err).To(BeNil()) + + // process the cloud backups + for _, cloudBackupData := range test.cloudBackups { + obj := &velerov1api.Backup{} + err := client.Get( + context.TODO(), + types.NamespacedName{ + Namespace: cloudBackupData.backup.Namespace, + Name: cloudBackupData.backup.Name}, + obj) + Expect(err).To(BeNil()) + + // did this cloud backup already exist in the cluster? + var existing *velerov1api.Backup + for _, obj := range test.existingBackups { + if obj.Name == cloudBackupData.backup.Name { + existing = obj break } } - require.NotNil(t, location) - - // process the cloud backups - for _, cloudBackupData := range backupDataSet { - obj, err := client.VeleroV1().Backups(test.namespace).Get(context.TODO(), cloudBackupData.backup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - // did this cloud backup already exist in the cluster? - var existing *velerov1api.Backup - for _, obj := range test.existingBackups { - if obj.Name == cloudBackupData.backup.Name { - existing = obj - break - } - } - if existing != nil { - // if this cloud backup already exists in the cluster, make sure that what we get from the - // client is the existing backup, not the cloud one. + if existing != nil { + // if this cloud backup already exists in the cluster, make sure that what we get from the + // client is the existing backup, not the cloud one. - // verify that the in-cluster backup has its storage location populated, if it's not already. - expected := existing.DeepCopy() - expected.Spec.StorageLocation = location.Name + // verify that the in-cluster backup has its storage location populated, if it's not already. + expected := existing.DeepCopy() + expected.Spec.StorageLocation = test.location.Name - assert.Equal(t, expected, obj) - } else { - // verify that the storage location field and label are set properly - assert.Equal(t, location.Name, obj.Spec.StorageLocation) + Expect(expected).To(BeEquivalentTo(obj)) + } else { + // verify that the storage location field and label are set properly + Expect(test.location.Name).To(BeEquivalentTo(obj.Spec.StorageLocation)) - locationName := location.Name - if test.longLocationNameEnabled { - locationName = label.GetValidName(locationName) - } - assert.Equal(t, locationName, obj.Labels[velerov1api.StorageLocationLabel]) - assert.Equal(t, true, len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength) + locationName := test.location.Name + if test.longLocationNameEnabled { + locationName = label.GetValidName(locationName) } + Expect(locationName).To(BeEquivalentTo(obj.Labels[velerov1api.StorageLocationLabel])) + Expect(len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength).To(BeTrue()) + } - // process the cloud pod volume backups for this backup, if any - for _, podVolumeBackup := range cloudBackupData.podVolumeBackups { - objPodVolumeBackup, err := client.VeleroV1().PodVolumeBackups(test.namespace).Get(context.TODO(), podVolumeBackup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - // did this cloud pod volume backup already exist in the cluster? - var existingPodVolumeBackup *velerov1api.PodVolumeBackup - for _, objPodVolumeBackup := range test.existingPodVolumeBackups { - if objPodVolumeBackup.Name == podVolumeBackup.Name { - existingPodVolumeBackup = objPodVolumeBackup - break - } + // process the cloud pod volume backups for this backup, if any + for _, podVolumeBackup := range cloudBackupData.podVolumeBackups { + objPodVolumeBackup := &velerov1api.PodVolumeBackup{} + err := client.Get( + context.TODO(), + types.NamespacedName{ + Namespace: podVolumeBackup.Namespace, + Name: podVolumeBackup.Name, + }, + objPodVolumeBackup) + Expect(err).ShouldNot(HaveOccurred()) + + // did this cloud pod volume backup already exist in the cluster? + var existingPodVolumeBackup *velerov1api.PodVolumeBackup + for _, objPodVolumeBackup := range test.existingPodVolumeBackups { + if objPodVolumeBackup.Name == podVolumeBackup.Name { + existingPodVolumeBackup = objPodVolumeBackup + break } + } - if existingPodVolumeBackup != nil { - // if this cloud pod volume backup already exists in the cluster, make sure that what we get from the - // client is the existing backup, not the cloud one. - expected := existingPodVolumeBackup.DeepCopy() - assert.Equal(t, expected, objPodVolumeBackup) - } + if existingPodVolumeBackup != nil { + // if this cloud pod volume backup already exists in the cluster, make sure that what we get from the + // client is the existing backup, not the cloud one. + expected := existingPodVolumeBackup.DeepCopy() + Expect(expected).To(BeEquivalentTo(objPodVolumeBackup)) } } } - }) - } -} + } + }) -func TestDeleteOrphanedBackups(t *testing.T) { - baseBuilder := func(name string) *builder.BackupBuilder { - return builder.ForBackup("ns-1", name).ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "default")) - } + It("Test deleting orphaned backups.", func() { + longLabelName := "the-really-long-location-name-that-is-much-more-than-63-characters" - tests := []struct { - name string - cloudBackups sets.String - k8sBackups []*velerov1api.Backup - namespace string - expectedDeletes sets.String - }{ - { - name: "no overlapping backups", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backupB").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backupC").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder := func(name string) *builder.BackupBuilder { + return builder.ForBackup("ns-1", name).ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "default")) + } + + tests := []struct { + name string + cloudBackups sets.String + k8sBackups []*velerov1api.Backup + namespace string + expectedDeletes sets.String + useLongBSLName bool + }{ + { + name: "no overlapping backups", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backupB").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backupC").Phase(velerov1api.BackupPhaseCompleted).Result(), + }, + expectedDeletes: sets.NewString("backupA", "backupB", "backupC"), }, - expectedDeletes: sets.NewString("backupA", "backupB", "backupC"), - }, - { - name: "some overlapping backups", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), + { + name: "some overlapping backups", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), + }, + expectedDeletes: sets.NewString("backup-C"), }, - expectedDeletes: sets.NewString("backup-C"), - }, - { - name: "all overlapping backups", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-3").Phase(velerov1api.BackupPhaseCompleted).Result(), + { + name: "all overlapping backups", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-3").Phase(velerov1api.BackupPhaseCompleted).Result(), + }, + expectedDeletes: sets.NewString(), }, - expectedDeletes: sets.NewString(), - }, - { - name: "no overlapping backups but including backups that are not complete", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("Deleting").Phase(velerov1api.BackupPhaseDeleting).Result(), - baseBuilder("Failed").Phase(velerov1api.BackupPhaseFailed).Result(), - baseBuilder("FailedValidation").Phase(velerov1api.BackupPhaseFailedValidation).Result(), - baseBuilder("InProgress").Phase(velerov1api.BackupPhaseInProgress).Result(), - baseBuilder("New").Phase(velerov1api.BackupPhaseNew).Result(), + { + name: "no overlapping backups but including backups that are not complete", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("Deleting").Phase(velerov1api.BackupPhaseDeleting).Result(), + baseBuilder("Failed").Phase(velerov1api.BackupPhaseFailed).Result(), + baseBuilder("FailedValidation").Phase(velerov1api.BackupPhaseFailedValidation).Result(), + baseBuilder("InProgress").Phase(velerov1api.BackupPhaseInProgress).Result(), + baseBuilder("New").Phase(velerov1api.BackupPhaseNew).Result(), + }, + expectedDeletes: sets.NewString("backupA"), }, - expectedDeletes: sets.NewString("backupA"), - }, - { - name: "all overlapping backups and all backups that are not complete", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backup-1").Phase(velerov1api.BackupPhaseFailed).Result(), - baseBuilder("backup-2").Phase(velerov1api.BackupPhaseFailedValidation).Result(), - baseBuilder("backup-3").Phase(velerov1api.BackupPhaseInProgress).Result(), + { + name: "all overlapping backups and all backups that are not complete", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseFailed).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseFailedValidation).Result(), + baseBuilder("backup-3").Phase(velerov1api.BackupPhaseInProgress).Result(), + }, + expectedDeletes: sets.NewString(), }, - expectedDeletes: sets.NewString(), - }, - { - name: "no completed backups in other locations are deleted", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), - - baseBuilder("backup-4").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-5").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), - baseBuilder("backup-6").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + { + name: "no completed backups in other locations are deleted", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), + + baseBuilder("backup-4").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-5").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-6").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + }, + expectedDeletes: sets.NewString("backup-C"), }, - expectedDeletes: sets.NewString("backup-C"), - }, - } + { + name: "some overlapping backups", + namespace: "ns-1", + cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), + k8sBackups: []*velerov1api.Backup{ + builder.ForBackup("ns-1", "backup-1"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), + builder.ForBackup("ns-1", "backup-2"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), + builder.ForBackup("ns-1", "backup-C"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), + }, + expectedDeletes: sets.NewString("backup-C"), + useLongBSLName: true, + }, + } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, test := range tests { var ( - client = fake.NewSimpleClientset() - fakeClient = velerotest.NewFakeControllerRuntimeClient(t) - sharedInformers = informers.NewSharedInformerFactory(client, 0) + client = ctrlfake.NewClientBuilder().Build() + pluginManager = &pluginmocks.Manager{} + backupStores = make(map[string]*persistencemocks.BackupStore) ) - c := NewBackupSyncController( - client.VeleroV1(), - fakeClient, - client.VeleroV1(), - sharedInformers.Velero().V1().Backups().Lister(), - nil, // csiVSLister - time.Duration(0), - test.namespace, - nil, // csiSnapshotClient - nil, // kubeClient - "", - nil, // new plugin manager func - nil, // backupStoreGetter - velerotest.NewLogger(), - ).(*backupSyncController) + r := backupSyncReconciler{ + client: client, + namespace: test.namespace, + defaultBackupSyncPeriod: time.Second * 10, + newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), + logger: velerotest.NewLogger(), + } expectedDeleteActions := make([]core.Action, 0) for _, backup := range test.k8sBackups { - // add test backup to informer - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer") - // add test backup to client - _, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "Error adding backup to clientset") + err := client.Create(context.TODO(), backup, &ctrlClient.CreateOptions{}) + Expect(err).ShouldNot(HaveOccurred()) // if we expect this backup to be deleted, set up the expected DeleteAction if test.expectedDeletes.Has(backup.Name) { @@ -598,140 +579,45 @@ func TestDeleteOrphanedBackups(t *testing.T) { } } - c.deleteOrphanedBackups("default", test.cloudBackups, velerotest.NewLogger()) - - numBackups, err := numBackups(t, client, c.namespace) - assert.NoError(t, err) - - expected := len(test.k8sBackups) - len(test.expectedDeletes) - assert.Equal(t, expected, numBackups) - - velerotest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions())) - }) - } -} - -func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { - longLabelName := "the-really-long-location-name-that-is-much-more-than-63-characters" - tests := []struct { - name string - cloudBackups sets.String - k8sBackups []*velerov1api.Backup - namespace string - expectedDeletes sets.String - }{ - { - name: "some overlapping backups", - namespace: "ns-1", - cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerov1api.Backup{ - builder.ForBackup("ns-1", "backup-1"). - ObjectMeta( - builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), - ). - Phase(velerov1api.BackupPhaseCompleted). - Result(), - builder.ForBackup("ns-1", "backup-2"). - ObjectMeta( - builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), - ). - Phase(velerov1api.BackupPhaseCompleted). - Result(), - builder.ForBackup("ns-1", "backup-C"). - ObjectMeta( - builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), - ). - Phase(velerov1api.BackupPhaseCompleted). - Result(), - }, - expectedDeletes: sets.NewString("backup-C"), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var ( - client = fake.NewSimpleClientset() - fakeClient = velerotest.NewFakeControllerRuntimeClient(t) - sharedInformers = informers.NewSharedInformerFactory(client, 0) - ) - - c := NewBackupSyncController( - client.VeleroV1(), - fakeClient, - client.VeleroV1(), - sharedInformers.Velero().V1().Backups().Lister(), - nil, // csiVSLister - time.Duration(0), - test.namespace, - nil, // csiSnapshotClient - nil, // kubeClient - "", - nil, // new plugin manager func - nil, // backupStoreGetter - velerotest.NewLogger(), - ).(*backupSyncController) - - expectedDeleteActions := make([]core.Action, 0) - - for _, backup := range test.k8sBackups { - // add test backup to informer - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer") - - // add test backup to client - _, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "Error adding backup to clientset") - - // if we expect this backup to be deleted, set up the expected DeleteAction - if test.expectedDeletes.Has(backup.Name) { - actionDelete := core.NewDeleteAction( - velerov1api.SchemeGroupVersion.WithResource("backups"), - test.namespace, - backup.Name, - ) - expectedDeleteActions = append(expectedDeleteActions, actionDelete) - } + bslName := "default" + if test.useLongBSLName { + bslName = longLabelName } + r.deleteOrphanedBackups(ctx, bslName, test.cloudBackups, velerotest.NewLogger()) - c.deleteOrphanedBackups(longLabelName, test.cloudBackups, velerotest.NewLogger()) + numBackups, err := numBackups(client, r.namespace) + Expect(err).ShouldNot(HaveOccurred()) - numBackups, err := numBackups(t, client, c.namespace) - assert.NoError(t, err) + fmt.Println("") expected := len(test.k8sBackups) - len(test.expectedDeletes) - assert.Equal(t, expected, numBackups) + Expect(expected).To(BeEquivalentTo(numBackups)) + } + }) - velerotest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions())) - }) - } -} + It("Test moving default BSL at the head of BSL array.", func() { + locationList := &velerov1api.BackupStorageLocationList{} + objArray := make([]runtime.Object, 0) -func getDeleteActions(actions []core.Action) []core.Action { - var deleteActions []core.Action - for _, action := range actions { - if action.GetVerb() == "delete" { - deleteActions = append(deleteActions, action) + // Generate BSL array. + locations := defaultLocationsList("velero") + for _, bsl := range locations { + objArray = append(objArray, bsl) } - } - return deleteActions -} -func numBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { - t.Helper() - existingK8SBackups, err := c.VeleroV1().Backups(ns).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return 0, err - } + meta.SetList(locationList, objArray) - return len(existingK8SBackups.Items), nil -} + testObjList := backupSyncSourceOrderFunc(locationList) + testObjArray, err := meta.ExtractList(testObjList) + Expect(err).ShouldNot(HaveOccurred()) -func numPodVolumeBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { - t.Helper() - existingK8SPodvolumeBackups, err := c.VeleroV1().PodVolumeBackups(ns).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return 0, err - } + expectLocation := testObjArray[0].(*velerov1api.BackupStorageLocation) + Expect(expectLocation.Spec.Default).To(BeEquivalentTo(true)) - return len(existingK8SPodvolumeBackups.Items), nil -} + // If BSL list without default BSL is passed in, the output should be same with input. + locationList.Items = testObjList.(*velerov1api.BackupStorageLocationList).Items[1:] + testObjList = backupSyncSourceOrderFunc(locationList) + Expect(testObjList).To(BeEquivalentTo(locationList)) + + }) +}) diff --git a/pkg/controller/download_request_controller.go b/pkg/controller/download_request_controller.go index a633daf0d2..c40d7aed3a 100644 --- a/pkg/controller/download_request_controller.go +++ b/pkg/controller/download_request_controller.go @@ -23,7 +23,6 @@ import ( "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,23 +32,39 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" ) -// DownloadRequestReconciler reconciles a DownloadRequest object -type DownloadRequestReconciler struct { - Scheme *runtime.Scheme - Client kbclient.Client - Clock clock.Clock +// downloadRequestReconciler reconciles a DownloadRequest object +type downloadRequestReconciler struct { + client kbclient.Client + clock clock.Clock // use variables to refer to these functions so they can be // replaced with fakes for testing. - NewPluginManager func(logrus.FieldLogger) clientmgmt.Manager - BackupStoreGetter persistence.ObjectBackupStoreGetter + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager + backupStoreGetter persistence.ObjectBackupStoreGetter - Log logrus.FieldLogger + log logrus.FieldLogger +} + +// NewDownloadRequestReconciler initializes and returns downloadRequestReconciler struct. +func NewDownloadRequestReconciler( + client kbclient.Client, + clock clock.Clock, + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, + backupStoreGetter persistence.ObjectBackupStoreGetter, + log logrus.FieldLogger, +) *downloadRequestReconciler { + return &downloadRequestReconciler{ + client: client, + clock: clock, + newPluginManager: newPluginManager, + backupStoreGetter: backupStoreGetter, + log: log, + } } // +kubebuilder:rbac:groups=velero.io,resources=downloadrequests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=velero.io,resources=downloadrequests/status,verbs=get;update;patch -func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Log.WithFields(logrus.Fields{ +func (r *downloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.log.WithFields(logrus.Fields{ "controller": "download-request", "downloadRequest": req.NamespacedName, }) @@ -57,7 +72,7 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ // Fetch the DownloadRequest instance. log.Debug("Getting DownloadRequest") downloadRequest := &velerov1api.DownloadRequest{} - if err := r.Client.Get(ctx, req.NamespacedName, downloadRequest); err != nil { + if err := r.client.Get(ctx, req.NamespacedName, downloadRequest); err != nil { if apierrors.IsNotFound(err) { log.Debug("Unable to find DownloadRequest") return ctrl.Result{}, nil @@ -70,19 +85,19 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ original := downloadRequest.DeepCopy() defer func() { // Always attempt to Patch the downloadRequest object and status after each reconciliation. - if err := r.Client.Patch(ctx, downloadRequest, kbclient.MergeFrom(original)); err != nil { + if err := r.client.Patch(ctx, downloadRequest, kbclient.MergeFrom(original)); err != nil { log.WithError(err).Error("Error updating download request") return } }() if downloadRequest.Status != (velerov1api.DownloadRequestStatus{}) && downloadRequest.Status.Expiration != nil { - if downloadRequest.Status.Expiration.Time.Before(r.Clock.Now()) { + if downloadRequest.Status.Expiration.Time.Before(r.clock.Now()) { // Delete any request that is expired, regardless of the phase: it is not // worth proceeding and trying/retrying to find it. log.Debug("DownloadRequest has expired - deleting") - if err := r.Client.Delete(ctx, downloadRequest); err != nil { + if err := r.client.Delete(ctx, downloadRequest); err != nil { log.WithError(err).Error("Error deleting an expired download request") return ctrl.Result{}, errors.WithStack(err) } @@ -103,12 +118,12 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ if downloadRequest.Status.Phase == "" || downloadRequest.Status.Phase == velerov1api.DownloadRequestPhaseNew { // Update the expiration. - downloadRequest.Status.Expiration = &metav1.Time{Time: r.Clock.Now().Add(persistence.DownloadURLTTL)} + downloadRequest.Status.Expiration = &metav1.Time{Time: r.clock.Now().Add(persistence.DownloadURLTTL)} if downloadRequest.Spec.Target.Kind == velerov1api.DownloadTargetKindRestoreLog || downloadRequest.Spec.Target.Kind == velerov1api.DownloadTargetKindRestoreResults { restore := &velerov1api.Restore{} - if err := r.Client.Get(ctx, kbclient.ObjectKey{ + if err := r.client.Get(ctx, kbclient.ObjectKey{ Namespace: downloadRequest.Namespace, Name: downloadRequest.Spec.Target.Name, }, restore); err != nil { @@ -118,7 +133,7 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ } backup := &velerov1api.Backup{} - if err := r.Client.Get(ctx, kbclient.ObjectKey{ + if err := r.client.Get(ctx, kbclient.ObjectKey{ Namespace: downloadRequest.Namespace, Name: backupName, }, backup); err != nil { @@ -126,17 +141,17 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ } location := &velerov1api.BackupStorageLocation{} - if err := r.Client.Get(ctx, kbclient.ObjectKey{ + if err := r.client.Get(ctx, kbclient.ObjectKey{ Namespace: backup.Namespace, Name: backup.Spec.StorageLocation, }, location); err != nil { return ctrl.Result{}, errors.WithStack(err) } - pluginManager := r.NewPluginManager(log) + pluginManager := r.newPluginManager(log) defer pluginManager.CleanupClients() - backupStore, err := r.BackupStoreGetter.Get(location, pluginManager, log) + backupStore, err := r.backupStoreGetter.Get(location, pluginManager, log) if err != nil { log.WithError(err).Error("Error getting a backup store") return ctrl.Result{}, errors.WithStack(err) @@ -149,7 +164,7 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ downloadRequest.Status.Phase = velerov1api.DownloadRequestPhaseProcessed // Update the expiration again to extend the time we wait (the TTL) to start after successfully processing the URL. - downloadRequest.Status.Expiration = &metav1.Time{Time: r.Clock.Now().Add(persistence.DownloadURLTTL)} + downloadRequest.Status.Expiration = &metav1.Time{Time: r.clock.Now().Add(persistence.DownloadURLTTL)} } // Requeue is mostly to handle deleting any expired requests that were not @@ -157,7 +172,7 @@ func (r *DownloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{Requeue: true}, nil } -func (r *DownloadRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *downloadRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&velerov1api.DownloadRequest{}). Complete(r) diff --git a/pkg/controller/download_request_controller_test.go b/pkg/controller/download_request_controller_test.go index f84bdaed0c..340a6e5ce2 100644 --- a/pkg/controller/download_request_controller_test.go +++ b/pkg/controller/download_request_controller_test.go @@ -87,7 +87,7 @@ var _ = Describe("Download Request Reconciler", func() { test.downloadRequest.Status.Expiration = &metav1.Time{Time: rClock.Now().Add(-1 * time.Minute)} } - fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).Build() err = fakeClient.Create(context.TODO(), test.downloadRequest) Expect(err).To(BeNil()) @@ -109,13 +109,13 @@ var _ = Describe("Download Request Reconciler", func() { // Setup reconciler Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed()) - r := DownloadRequestReconciler{ - Client: fakeClient, - Clock: rClock, - NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores), - Log: velerotest.NewLogger(), - } + r := NewDownloadRequestReconciler( + fakeClient, + rClock, + func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, + NewFakeObjectBackupStoreGetter(backupStores), + velerotest.NewLogger(), + ) if test.backupLocation != nil && test.expectGetsURL { backupStores[test.backupLocation.Name].On("GetDownloadURL", test.downloadRequest.Spec.Target).Return("a-url", nil) @@ -136,7 +136,7 @@ var _ = Describe("Download Request Reconciler", func() { } instance := &velerov1api.DownloadRequest{} - err = r.Client.Get(ctx, kbclient.ObjectKey{Name: test.downloadRequest.Name, Namespace: test.downloadRequest.Namespace}, instance) + err = r.client.Get(ctx, kbclient.ObjectKey{Name: test.downloadRequest.Name, Namespace: test.downloadRequest.Namespace}, instance) if test.expired { Expect(instance).ToNot(Equal(test.downloadRequest)) @@ -153,7 +153,7 @@ var _ = Describe("Download Request Reconciler", func() { if test.expectGetsURL { Expect(string(instance.Status.Phase)).To(Equal(string(velerov1api.DownloadRequestPhaseProcessed))) Expect(instance.Status.DownloadURL).To(Equal("a-url")) - Expect(velerotest.TimesAreEqual(instance.Status.Expiration.Time, r.Clock.Now().Add(signedURLTTL))).To(BeTrue()) + Expect(velerotest.TimesAreEqual(instance.Status.Expiration.Time, r.clock.Now().Add(signedURLTTL))).To(BeTrue()) } }, diff --git a/pkg/controller/gc_controller.go b/pkg/controller/gc_controller.go index b8297aa2a9..61b22996a6 100644 --- a/pkg/controller/gc_controller.go +++ b/pkg/controller/gc_controller.go @@ -23,19 +23,17 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/client-go/tools/cache" - + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" - velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" - velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" - velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) const ( @@ -46,111 +44,94 @@ const ( gcFailureBSLReadOnly = "BSLReadOnly" ) -// gcController creates DeleteBackupRequests for expired backups. -type gcController struct { - *genericController - - backupLister velerov1listers.BackupLister - deleteBackupRequestLister velerov1listers.DeleteBackupRequestLister - deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter - kbClient client.Client - frequency time.Duration - - clock clock.Clock +// gcReconciler creates DeleteBackupRequests for expired backups. +type gcReconciler struct { + client.Client + logger logrus.FieldLogger + clock clock.Clock + frequency time.Duration } -// NewGCController constructs a new gcController. -func NewGCController( +// NewGCReconciler constructs a new gcReconciler. +func NewGCReconciler( logger logrus.FieldLogger, - backupInformer velerov1informers.BackupInformer, - deleteBackupRequestLister velerov1listers.DeleteBackupRequestLister, - deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter, - kbClient client.Client, + client client.Client, frequency time.Duration, -) Interface { - c := &gcController{ - genericController: newGenericController(GarbageCollection, logger), - clock: clock.RealClock{}, - backupLister: backupInformer.Lister(), - deleteBackupRequestLister: deleteBackupRequestLister, - deleteBackupRequestClient: deleteBackupRequestClient, - kbClient: kbClient, +) *gcReconciler { + gcr := &gcReconciler{ + Client: client, + logger: logger, + clock: clock.RealClock{}, + frequency: frequency, } - - c.syncHandler = c.processQueueItem - c.resyncPeriod = frequency - if c.resyncPeriod <= 0 { - c.resyncPeriod = defaultGCFrequency + if gcr.frequency <= 0 { + gcr.frequency = defaultGCFrequency } - logger.Infof("Garbage collection frequency: %s", c.resyncPeriod.String()) - c.resyncFunc = c.enqueueAllBackups - - backupInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueue, - UpdateFunc: func(_, obj interface{}) { c.enqueue(obj) }, - }, - ) - - return c + return gcr } -// enqueueAllBackups lists all backups from cache and enqueues all of them so we can check each one -// for expiration. -func (c *gcController) enqueueAllBackups() { - c.logger.Debug("gcController.enqueueAllBackups") - - backups, err := c.backupLister.List(labels.Everything()) - if err != nil { - c.logger.WithError(errors.WithStack(err)).Error("error listing backups") - return - } - - for _, backup := range backups { - c.enqueue(backup) - } +// GCController only watches on CreateEvent for ensuring every new backup will be taken care of. +// Other Events will be filtered to decrease the number of reconcile call. Especially UpdateEvent must be filtered since we removed +// the backup status as the sub-resource of backup in v1.9, every change on it will be treated as UpdateEvent and trigger reconcile call. +func (c *gcReconciler) SetupWithManager(mgr ctrl.Manager) error { + s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1api.BackupList{}, c.frequency, kube.PeriodicalEnqueueSourceOption{}) + return ctrl.NewControllerManagedBy(mgr). + For(&velerov1api.Backup{}, builder.WithPredicates(predicate.Funcs{ + UpdateFunc: func(ue event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(de event.DeleteEvent) bool { + return false + }, + GenericFunc: func(ge event.GenericEvent) bool { + return false + }, + })). + Watches(s, nil). + Complete(c) } -func (c *gcController) processQueueItem(key string) error { - log := c.logger.WithField("backup", key) - - ns, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return errors.Wrap(err, "error splitting queue key") - } - - backup, err := c.backupLister.Backups(ns).Get(name) - if apierrors.IsNotFound(err) { - log.Debug("Unable to find backup") - return nil - } - if err != nil { - return errors.Wrap(err, "error getting backup") +// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=velero.io,resources=backups/status,verbs=get +// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests,verbs=get;list;watch;create; +// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests/status,verbs=get +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get +func (c *gcReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := c.logger.WithField("gc backup", req.String()) + log.Debug("gcController getting backup") + + backup := &velerov1api.Backup{} + if err := c.Get(ctx, req.NamespacedName, backup); err != nil { + if apierrors.IsNotFound(err) { + log.WithError(err).Error("backup not found") + return ctrl.Result{}, nil + } + return ctrl.Result{}, errors.Wrapf(err, "error getting backup %s", req.String()) } + log.Debugf("backup: %s", backup.Name) log = c.logger.WithFields( logrus.Fields{ - "backup": key, + "backup": req.String(), "expiration": backup.Status.Expiration, }, ) now := c.clock.Now() - if backup.Status.Expiration == nil || backup.Status.Expiration.After(now) { log.Debug("Backup has not expired yet, skipping") - return nil + return ctrl.Result{}, nil } - log.Info("Backup has expired") + log.Infof("Backup:%s has expired", backup.Name) if backup.Labels == nil { backup.Labels = make(map[string]string) } loc := &velerov1api.BackupStorageLocation{} - if err := c.kbClient.Get(context.Background(), client.ObjectKey{ - Namespace: ns, + if err := c.Get(ctx, client.ObjectKey{ + Namespace: req.Namespace, Name: backup.Spec.StorageLocation, }, loc); err != nil { if apierrors.IsNotFound(err) { @@ -159,53 +140,56 @@ func (c *gcController) processQueueItem(key string) error { } else { backup.Labels[garbageCollectionFailure] = gcFailureBSLCannotGet } - if err := c.kbClient.Update(context.Background(), backup); err != nil { + if err := c.Update(ctx, backup); err != nil { log.WithError(err).Error("error updating backup labels") } - return errors.Wrap(err, "error getting backup storage location") + return ctrl.Result{}, errors.Wrap(err, "error getting backup storage location") } if loc.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly { log.Infof("Backup cannot be garbage-collected because backup storage location %s is currently in read-only mode", loc.Name) backup.Labels[garbageCollectionFailure] = gcFailureBSLReadOnly - if err := c.kbClient.Update(context.Background(), backup); err != nil { + if err := c.Update(ctx, backup); err != nil { log.WithError(err).Error("error updating backup labels") } - return nil + return ctrl.Result{}, nil } // remove gc fail error label after this point delete(backup.Labels, garbageCollectionFailure) - if err := c.kbClient.Update(context.Background(), backup); err != nil { + if err := c.Update(ctx, backup); err != nil { log.WithError(err).Error("error updating backup labels") } - selector := labels.SelectorFromSet(labels.Set(map[string]string{ + selector := client.MatchingLabels{ velerov1api.BackupNameLabel: label.GetValidName(backup.Name), velerov1api.BackupUIDLabel: string(backup.UID), - })) + } - dbrs, err := c.deleteBackupRequestLister.DeleteBackupRequests(ns).List(selector) - if err != nil { - return errors.Wrap(err, "error listing existing DeleteBackupRequests for backup") + dbrs := &velerov1api.DeleteBackupRequestList{} + if err := c.List(ctx, dbrs, selector); err != nil { + log.WithError(err).Error("error listing DeleteBackupRequests") + return ctrl.Result{}, errors.Wrap(err, "error listing existing DeleteBackupRequests for backup") } + log.Debugf("length of dbrs:%d", len(dbrs.Items)) // if there's an existing unprocessed deletion request for this backup, don't create // another one - for _, dbr := range dbrs { + for _, dbr := range dbrs.Items { switch dbr.Status.Phase { case "", velerov1api.DeleteBackupRequestPhaseNew, velerov1api.DeleteBackupRequestPhaseInProgress: log.Info("Backup already has a pending deletion request") - return nil + return ctrl.Result{}, nil } } log.Info("Creating a new deletion request") - req := pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID)) - - if _, err = c.deleteBackupRequestClient.DeleteBackupRequests(ns).Create(context.TODO(), req, metav1.CreateOptions{}); err != nil { - return errors.Wrap(err, "error creating DeleteBackupRequest") + ndbr := pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID)) + ndbr.SetNamespace(backup.Namespace) + if err := c.Create(ctx, ndbr); err != nil { + log.WithError(err).Error("error creating DeleteBackupRequests") + return ctrl.Result{}, errors.Wrap(err, "error creating DeleteBackupRequest") } - return nil + return ctrl.Result{}, nil } diff --git a/pkg/controller/gc_controller_test.go b/pkg/controller/gc_controller_test.go index c373064e48..772d123c0a 100644 --- a/pkg/controller/gc_controller_test.go +++ b/pkg/controller/gc_controller_test.go @@ -18,152 +18,42 @@ package controller import ( "context" - "fmt" - "sort" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/watch" - core "k8s.io/client-go/testing" - + ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" - informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" velerotest "github.com/vmware-tanzu/velero/pkg/test" - "github.com/vmware-tanzu/velero/pkg/util/kube" ) -func TestGCControllerEnqueueAllBackups(t *testing.T) { - var ( - client = fake.NewSimpleClientset() - sharedInformers = informers.NewSharedInformerFactory(client, 0) - - controller = NewGCController( - velerotest.NewLogger(), - sharedInformers.Velero().V1().Backups(), - sharedInformers.Velero().V1().DeleteBackupRequests().Lister(), - client.VeleroV1(), - nil, - defaultGCFrequency, - ).(*gcController) - ) - - keys := make(chan string) - - controller.syncHandler = func(key string) error { - keys <- key - return nil - } - - var expected []string - - for i := 0; i < 3; i++ { - backup := builder.ForBackup(velerov1api.DefaultNamespace, fmt.Sprintf("backup-%d", i)).Result() - sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup) - expected = append(expected, kube.NamespaceAndName(backup)) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - go controller.Run(ctx, 1) - - var received []string - -Loop: - for { - select { - case <-ctx.Done(): - t.Fatal("test timed out") - case key := <-keys: - received = append(received, key) - if len(received) == len(expected) { - break Loop - } - } - } - - sort.Strings(expected) - sort.Strings(received) - assert.Equal(t, expected, received) -} - -func TestGCControllerHasUpdateFunc(t *testing.T) { - backup := defaultBackup().Result() - expected := kube.NamespaceAndName(backup) - - client := fake.NewSimpleClientset(backup) - - fakeWatch := watch.NewFake() - defer fakeWatch.Stop() - client.PrependWatchReactor("backups", core.DefaultWatchReactor(fakeWatch, nil)) - - sharedInformers := informers.NewSharedInformerFactory(client, 0) - - controller := NewGCController( +func mockGCReconciler(fakeClient kbclient.Client, fakeClock *clock.FakeClock, freq time.Duration) *gcReconciler { + gcr := NewGCReconciler( velerotest.NewLogger(), - sharedInformers.Velero().V1().Backups(), - sharedInformers.Velero().V1().DeleteBackupRequests().Lister(), - client.VeleroV1(), - nil, - defaultGCFrequency, - ).(*gcController) - - keys := make(chan string) - - controller.syncHandler = func(key string) error { - keys <- key - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - go sharedInformers.Start(ctx.Done()) - go controller.Run(ctx, 1) - - // wait for the AddFunc - select { - case <-ctx.Done(): - t.Fatal("test timed out waiting for AddFunc") - case key := <-keys: - assert.Equal(t, expected, key) - } - - backup.Status.Version = 1234 - fakeWatch.Add(backup) - - // wait for the UpdateFunc - select { - case <-ctx.Done(): - t.Fatal("test timed out waiting for UpdateFunc") - case key := <-keys: - assert.Equal(t, expected, key) - } + fakeClient, + freq, + ) + gcr.clock = fakeClock + return gcr } -func TestGCControllerProcessQueueItem(t *testing.T) { - +func TestGCReconcile(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) - defaultBackupLocation := builder.ForBackupStorageLocation("velero", "default").Result() + defaultBackupLocation := builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "default").Result() tests := []struct { - name string - backup *velerov1api.Backup - deleteBackupRequests []*velerov1api.DeleteBackupRequest - backupLocation *velerov1api.BackupStorageLocation - expectDeletion bool - createDeleteBackupRequestError bool - expectError bool + name string + backup *velerov1api.Backup + deleteBackupRequests []*velerov1api.DeleteBackupRequest + backupLocation *velerov1api.BackupStorageLocation + expectError bool }{ { name: "can't find backup - no error", @@ -172,25 +62,21 @@ func TestGCControllerProcessQueueItem(t *testing.T) { name: "unexpired backup is not deleted", backup: defaultBackup().Expiration(fakeClock.Now().Add(time.Minute)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, - expectDeletion: false, }, { name: "expired backup in read-only storage location is not deleted", backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-only").Result(), backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(), - expectDeletion: false, }, { name: "expired backup in read-write storage location is deleted", backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-write").Result(), backupLocation: builder.ForBackupStorageLocation("velero", "read-write").AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite).Result(), - expectDeletion: true, }, { name: "expired backup with no pending deletion requests is deleted", backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, - expectDeletion: true, }, { name: "expired backup with a pending deletion request is not deleted", @@ -211,7 +97,6 @@ func TestGCControllerProcessQueueItem(t *testing.T) { }, }, }, - expectDeletion: false, }, { name: "expired backup with only processed deletion requests is deleted", @@ -232,72 +117,31 @@ func TestGCControllerProcessQueueItem(t *testing.T) { }, }, }, - expectDeletion: true, - }, - { - name: "create DeleteBackupRequest error returns an error", - backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), - backupLocation: defaultBackupLocation, - expectDeletion: true, - createDeleteBackupRequestError: true, - expectError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - var ( - client = fake.NewSimpleClientset() - sharedInformers = informers.NewSharedInformerFactory(client, 0) - ) - - var fakeClient kbclient.Client - if test.backupLocation != nil { - fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation) - } else { - fakeClient = velerotest.NewFakeControllerRuntimeClient(t) + if test.backup == nil { + return } - controller := NewGCController( - velerotest.NewLogger(), - sharedInformers.Velero().V1().Backups(), - sharedInformers.Velero().V1().DeleteBackupRequests().Lister(), - client.VeleroV1(), - fakeClient, - defaultGCFrequency, - ).(*gcController) - controller.clock = fakeClock + initObjs := []runtime.Object{} + initObjs = append(initObjs, test.backup) - var key string - if test.backup != nil { - key = kube.NamespaceAndName(test.backup) - sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup) + if test.backupLocation != nil { + initObjs = append(initObjs, test.backupLocation) } for _, dbr := range test.deleteBackupRequests { - sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(dbr) + initObjs = append(initObjs, dbr) } - if test.createDeleteBackupRequestError { - client.PrependReactor("create", "deletebackuprequests", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, errors.New("foo") - }) - } - - err := controller.processQueueItem(key) + fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...) + reconciler := mockGCReconciler(fakeClient, fakeClock, defaultGCFrequency) + _, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}}) gotErr := err != nil assert.Equal(t, test.expectError, gotErr) - - if test.expectDeletion { - require.Len(t, client.Actions(), 1) - - createAction, ok := client.Actions()[0].(core.CreateAction) - require.True(t, ok) - - assert.Equal(t, "deletebackuprequests", createAction.GetResource().Resource) - } else { - assert.Len(t, client.Actions(), 0) - } }) } } diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index abec6d601f..6b4cf22268 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -19,8 +19,6 @@ package controller import ( "context" "fmt" - "os" - "strings" "time" "github.com/pkg/errors" @@ -36,29 +34,35 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/metrics" + "github.com/vmware-tanzu/velero/pkg/podvolume" + "github.com/vmware-tanzu/velero/pkg/repository" repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/uploader/provider" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" ) -// BackupExecuter runs backups. -type BackupExecuter interface { - RunBackup(*restic.Command, logrus.FieldLogger, func(velerov1api.PodVolumeOperationProgress)) (string, string, error) - GetSnapshotID(*restic.Command) (string, error) -} +// For unit test to mock function +var NewUploaderProviderFunc = provider.NewUploaderProvider // PodVolumeBackupReconciler reconciles a PodVolumeBackup object type PodVolumeBackupReconciler struct { - Scheme *runtime.Scheme - Client client.Client - Clock clock.Clock - Metrics *metrics.ServerMetrics - CredsFileStore credentials.FileStore - NodeName string - FileSystem filesystem.Interface - ResticExec BackupExecuter - Log logrus.FieldLogger + Scheme *runtime.Scheme + Client client.Client + Clock clock.Clock + Metrics *metrics.ServerMetrics + CredentialGetter *credentials.CredentialGetter + NodeName string + FileSystem filesystem.Interface + Log logrus.FieldLogger +} + +type BackupProgressUpdater struct { + PodVolumeBackup *velerov1api.PodVolumeBackup + Log logrus.FieldLogger + Ctx context.Context + Cli client.Client } // +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete @@ -77,7 +81,6 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ } return ctrl.Result{}, errors.Wrap(err, "getting PodVolumeBackup") } - if len(pvb.OwnerReferences) == 1 { log = log.WithField( "backup", @@ -120,16 +123,19 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ return r.updateStatusToFailed(ctx, &pvb, err, fmt.Sprintf("getting pod %s/%s", pvb.Spec.Pod.Namespace, pvb.Spec.Pod.Name), log) } - var resticDetails resticDetails - resticCmd, err := r.buildResticCommand(ctx, log, &pvb, &pod, &resticDetails) + volDir, err := kube.GetVolumeDirectory(ctx, log, &pod, pvb.Spec.Volume, r.Client) if err != nil { - return r.updateStatusToFailed(ctx, &pvb, err, "building Restic command", log) + return r.updateStatusToFailed(ctx, &pvb, err, "getting volume directory name", log) } - defer func() { - os.Remove(resticDetails.credsFile) - os.Remove(resticDetails.caCertFile) - }() + pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pvb.Spec.Pod.UID), volDir) + log.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob") + + path, err := kube.SinglePathMatch(pathGlob, r.FileSystem, log) + if err != nil { + return r.updateStatusToFailed(ctx, &pvb, err, "identifying unique volume path on host", log) + } + log.WithField("path", path).Debugf("Found path matching glob") backupLocation := &velerov1api.BackupStorageLocation{} if err := r.Client.Get(context.Background(), client.ObjectKey{ @@ -139,47 +145,51 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, errors.Wrap(err, "error getting backup storage location") } - // #4820: restrieve insecureSkipTLSVerify from BSL configuration for - // AWS plugin. If nothing is return, that means insecureSkipTLSVerify - // is not enable for Restic command. - skipTLSRet := restic.GetInsecureSkipTLSVerifyFromBSL(backupLocation, log) - if len(skipTLSRet) > 0 { - resticCmd.ExtraFlags = append(resticCmd.ExtraFlags, skipTLSRet) + backupRepo, err := repository.GetBackupRepository(ctx, r.Client, pvb.Namespace, repository.BackupRepositoryKey{ + VolumeNamespace: pvb.Spec.Pod.Namespace, + BackupLocation: pvb.Spec.BackupStorageLocation, + RepositoryType: podvolume.GetPvbRepositoryType(&pvb), + }) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "error getting backup repository") } - var stdout, stderr string - - var emptySnapshot bool - stdout, stderr, err = r.ResticExec.RunBackup(resticCmd, log, r.updateBackupProgressFunc(&pvb, log)) + var uploaderProv provider.Provider + uploaderProv, err = NewUploaderProviderFunc(ctx, r.Client, pvb.Spec.UploaderType, pvb.Spec.RepoIdentifier, + backupLocation, backupRepo, r.CredentialGetter, repokey.RepoKeySelector(), log) if err != nil { - if strings.Contains(stderr, "snapshot is empty") { - emptySnapshot = true + return r.updateStatusToFailed(ctx, &pvb, err, "error creating uploader", log) + } + + // If this is a PVC, look for the most recent completed pod volume backup for it and get + // its snapshot ID to do new backup based on it. Without this, + // if the pod using the PVC (and therefore the directory path under /host_pods/) has + // changed since the PVC's last backup, for backup, it will not be able to identify a suitable + // parent snapshot to use, and will have to do a full rescan of the contents of the PVC. + var parentSnapshotID string + if pvcUID, ok := pvb.Labels[velerov1api.PVCUIDLabel]; ok { + parentSnapshotID = r.getParentSnapshot(ctx, log, pvcUID, &pvb) + if parentSnapshotID == "" { + log.Info("No parent snapshot found for PVC, not based on parent snapshot for this backup") } else { - return r.updateStatusToFailed(ctx, &pvb, err, fmt.Sprintf("running Restic backup, stderr=%s", stderr), log) + log.WithField("parentSnapshotID", parentSnapshotID).Info("Based on parent snapshot for this backup") } } - log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) - var snapshotID string - if !emptySnapshot { - cmd := restic.GetSnapshotCommand(pvb.Spec.RepoIdentifier, resticDetails.credsFile, pvb.Spec.Tags) - cmd.Env = resticDetails.envs - cmd.CACertFile = resticDetails.caCertFile - - // #4820: also apply the insecureTLS flag to Restic snapshots command - if len(skipTLSRet) > 0 { - cmd.ExtraFlags = append(cmd.ExtraFlags, skipTLSRet) + defer func() { + if err := uploaderProv.Close(ctx); err != nil { + log.Errorf("failed to close uploader provider with error %v", err) } + }() - snapshotID, err = r.ResticExec.GetSnapshotID(cmd) - if err != nil { - return r.updateStatusToFailed(ctx, &pvb, err, "getting snapshot id", log) - } + snapshotID, emptySnapshot, err := uploaderProv.RunBackup(ctx, path, pvb.Spec.Tags, parentSnapshotID, r.NewBackupProgressUpdater(&pvb, log, ctx)) + if err != nil { + return r.updateStatusToFailed(ctx, &pvb, err, fmt.Sprintf("running backup, stderr=%v", err), log) } // Update status to Completed with path & snapshot ID. original = pvb.DeepCopy() - pvb.Status.Path = resticDetails.path + pvb.Status.Path = path pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted pvb.Status.SnapshotID = snapshotID pvb.Status.CompletionTimestamp = &metav1.Time{Time: r.Clock.Now()} @@ -194,8 +204,9 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ latencyDuration := pvb.Status.CompletionTimestamp.Time.Sub(pvb.Status.StartTimestamp.Time) latencySeconds := float64(latencyDuration / time.Second) backupName := fmt.Sprintf("%s/%s", req.Namespace, pvb.OwnerReferences[0].Name) - r.Metrics.ObserveResticOpLatency(r.NodeName, req.Name, resticCmd.Command, backupName, latencySeconds) - r.Metrics.RegisterResticOpLatencyGauge(r.NodeName, req.Name, resticCmd.Command, backupName, latencySeconds) + generateOpName := fmt.Sprintf("%s-%s-%s-%s-%s-backup", pvb.Name, backupRepo.Name, pvb.Spec.BackupStorageLocation, pvb.Namespace, pvb.Spec.UploaderType) + r.Metrics.ObserveResticOpLatency(r.NodeName, req.Name, generateOpName, backupName, latencySeconds) + r.Metrics.RegisterResticOpLatencyGauge(r.NodeName, req.Name, generateOpName, backupName, latencySeconds) r.Metrics.RegisterPodVolumeBackupDequeue(r.NodeName) log.Info("PodVolumeBackup completed") @@ -210,14 +221,14 @@ func (r *PodVolumeBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { } // getParentSnapshot finds the most recent completed PodVolumeBackup for the -// specified PVC and returns its Restic snapshot ID. Any errors encountered are +// specified PVC and returns its snapshot ID. Any errors encountered are // logged but not returned since they do not prevent a backup from proceeding. -func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log logrus.FieldLogger, pvbNamespace, pvcUID, bsl string) string { +func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log logrus.FieldLogger, pvcUID string, podVolumeBackup *velerov1api.PodVolumeBackup) string { log = log.WithField("pvcUID", pvcUID) log.Infof("Looking for most recent completed PodVolumeBackup for this PVC") listOpts := &client.ListOptions{ - Namespace: pvbNamespace, + Namespace: podVolumeBackup.Namespace, } matchingLabels := client.MatchingLabels(map[string]string{velerov1api.PVCUIDLabel: pvcUID}) matchingLabels.ApplyToList(listOpts) @@ -231,11 +242,14 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l // recent completed one to use as the parent. var mostRecentPVB velerov1api.PodVolumeBackup for _, pvb := range pvbList.Items { + if pvb.Spec.UploaderType != podVolumeBackup.Spec.UploaderType { + continue + } if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted { continue } - if bsl != pvb.Spec.BackupStorageLocation { + if podVolumeBackup.Spec.BackupStorageLocation != pvb.Spec.BackupStorageLocation { // Check the backup storage location is the same as spec in order to // support backup to multiple backup-locations. Otherwise, there exists // a case that backup volume snapshot to the second location would @@ -264,18 +278,6 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l return mostRecentPVB.Status.SnapshotID } -// updateBackupProgressFunc returns a func that takes progress info and patches -// the PVB with the new progress. -func (r *PodVolumeBackupReconciler) updateBackupProgressFunc(pvb *velerov1api.PodVolumeBackup, log logrus.FieldLogger) func(velerov1api.PodVolumeOperationProgress) { - return func(progress velerov1api.PodVolumeOperationProgress) { - original := pvb.DeepCopy() - pvb.Status.Progress = progress - if err := r.Client.Patch(context.Background(), pvb, client.MergeFrom(original)); err != nil { - log.WithError(err).Error("error update progress") - } - } -} - func (r *PodVolumeBackupReconciler) updateStatusToFailed(ctx context.Context, pvb *velerov1api.PodVolumeBackup, err error, msg string, log logrus.FieldLogger) (ctrl.Result, error) { original := pvb.DeepCopy() pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed @@ -290,77 +292,19 @@ func (r *PodVolumeBackupReconciler) updateStatusToFailed(ctx context.Context, pv return ctrl.Result{}, nil } -type resticDetails struct { - credsFile, caCertFile string - envs []string - path string +func (r *PodVolumeBackupReconciler) NewBackupProgressUpdater(pvb *velerov1api.PodVolumeBackup, log logrus.FieldLogger, ctx context.Context) *BackupProgressUpdater { + return &BackupProgressUpdater{pvb, log, ctx, r.Client} } -func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log *logrus.Entry, pvb *velerov1api.PodVolumeBackup, pod *corev1.Pod, details *resticDetails) (*restic.Command, error) { - volDir, err := kube.GetVolumeDirectory(ctx, log, pod, pvb.Spec.Volume, r.Client) - if err != nil { - return nil, errors.Wrap(err, "getting volume directory name") +//UpdateProgress which implement ProgressUpdater interface to update pvb progress status +func (b *BackupProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) { + original := b.PodVolumeBackup.DeepCopy() + b.PodVolumeBackup.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone} + if b.Cli == nil { + b.Log.Errorf("failed to update backup pod %s volume %s progress with uninitailize client", b.PodVolumeBackup.Spec.Pod.Name, b.PodVolumeBackup.Spec.Volume) + return } - - pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pvb.Spec.Pod.UID), volDir) - log.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob") - - path, err := kube.SinglePathMatch(pathGlob, r.FileSystem, log) - if err != nil { - return nil, errors.Wrap(err, "identifying unique volume path on host") - } - log.WithField("path", path).Debugf("Found path matching glob") - - // Temporary credentials. - details.credsFile, err = r.CredsFileStore.Path(repokey.RepoKeySelector()) - if err != nil { - return nil, errors.Wrap(err, "creating temporary Restic credentials file") + if err := b.Cli.Patch(b.Ctx, b.PodVolumeBackup, client.MergeFrom(original)); err != nil { + b.Log.Errorf("update backup pod %s volume %s progress with %v", b.PodVolumeBackup.Spec.Pod.Name, b.PodVolumeBackup.Spec.Volume, err) } - - cmd := restic.BackupCommand(pvb.Spec.RepoIdentifier, details.credsFile, path, pvb.Spec.Tags) - - backupLocation := &velerov1api.BackupStorageLocation{} - if err := r.Client.Get(context.Background(), client.ObjectKey{ - Namespace: pvb.Namespace, - Name: pvb.Spec.BackupStorageLocation, - }, backupLocation); err != nil { - return nil, errors.Wrap(err, "getting backup storage location") - } - - // If there's a caCert on the ObjectStorage, write it to disk so that it can - // be passed to Restic. - if backupLocation.Spec.ObjectStorage != nil && - backupLocation.Spec.ObjectStorage.CACert != nil { - - details.caCertFile, err = restic.TempCACertFile(backupLocation.Spec.ObjectStorage.CACert, pvb.Spec.BackupStorageLocation, r.FileSystem) - if err != nil { - log.WithError(err).Error("creating temporary caCert file") - } - } - cmd.CACertFile = details.caCertFile - - details.envs, err = restic.CmdEnv(backupLocation, r.CredsFileStore) - if err != nil { - return nil, errors.Wrap(err, "setting Restic command environment") - } - cmd.Env = details.envs - - // If this is a PVC, look for the most recent completed PodVolumeBackup for - // it and get its Restic snapshot ID to use as the value of the `--parent` - // flag. Without this, if the pod using the PVC (and therefore the directory - // path under /host_pods/) has changed since the PVC's last backup, Restic - // will not be able to identify a suitable parent snapshot to use, and will - // have to do a full rescan of the contents of the PVC. - if pvcUID, ok := pvb.Labels[velerov1api.PVCUIDLabel]; ok { - parentSnapshotID := r.getParentSnapshot(ctx, log, pvb.Namespace, pvcUID, pvb.Spec.BackupStorageLocation) - if parentSnapshotID == "" { - log.Info("No parent snapshot found for PVC, not using --parent flag for this backup") - } else { - log.WithField("parentSnapshotID", parentSnapshotID). - Info("Setting --parent flag for this backup") - cmd.ExtraFlags = append(cmd.ExtraFlags, fmt.Sprintf("--parent=%s", parentSnapshotID)) - } - } - - return cmd, nil } diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index ffc5f662cb..f25b31a95f 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -35,11 +36,13 @@ import ( kbclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/metrics" - "github.com/vmware-tanzu/velero/pkg/restic/mocks" velerotest "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/uploader/provider" ) const name = "pvb-1" @@ -68,12 +71,34 @@ func bslBuilder() *builder.BackupStorageLocationBuilder { ForBackupStorageLocation(velerov1api.DefaultNamespace, "bsl-loc") } +func buildBackupRepo() *velerov1api.BackupRepository { + return &velerov1api.BackupRepository{ + Spec: velerov1api.BackupRepositorySpec{ResticIdentifier: ""}, + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "BackupRepository", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: fmt.Sprintf("%s-bsl-loc-restic-dn24h", velerov1api.DefaultNamespace), + Labels: map[string]string{ + velerov1api.StorageLocationLabel: "bsl-loc", + velerov1api.VolumeNamespaceLabel: velerov1api.DefaultNamespace, + velerov1api.RepositoryTypeLabel: "restic", + }, + }, + Status: velerov1api.BackupRepositoryStatus{ + Phase: velerov1api.BackupRepositoryPhaseReady, + }, + } +} + var _ = Describe("PodVolumeBackup Reconciler", func() { type request struct { - pvb *velerov1api.PodVolumeBackup - pod *corev1.Pod - bsl *velerov1api.BackupStorageLocation - + pvb *velerov1api.PodVolumeBackup + pod *corev1.Pod + bsl *velerov1api.BackupStorageLocation + backupRepo *velerov1api.BackupRepository expectedProcessed bool expected *velerov1api.PodVolumeBackup expectedRequeue ctrl.Result @@ -100,31 +125,41 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { err = fakeClient.Create(ctx, test.bsl) Expect(err).To(BeNil()) + err = fakeClient.Create(ctx, test.backupRepo) + Expect(err).To(BeNil()) + fakeFS := velerotest.NewFakeFileSystem() pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", "", "pvb-1-volume") _, err = fakeFS.Create(pathGlob) Expect(err).To(BeNil()) + credentialFileStore, err := credentials.NewNamespacedFileStore( + fakeClient, + velerov1api.DefaultNamespace, + "/tmp/credentials", + fakeFS, + ) + // Setup reconciler Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed()) r := PodVolumeBackupReconciler{ - Client: fakeClient, - Clock: clock.NewFakeClock(now), - Metrics: metrics.NewResticServerMetrics(), - CredsFileStore: fakeCredsFileStore{}, - NodeName: "test_node", - FileSystem: fakeFS, - ResticExec: mocks.FakeResticBackupExec{}, - Log: velerotest.NewLogger(), + Client: fakeClient, + Clock: clock.NewFakeClock(now), + Metrics: metrics.NewResticServerMetrics(), + CredentialGetter: &credentials.CredentialGetter{FromFile: credentialFileStore}, + NodeName: "test_node", + FileSystem: fakeFS, + Log: velerotest.NewLogger(), + } + NewUploaderProviderFunc = func(ctx context.Context, client kbclient.Client, uploaderType, repoIdentifier string, bsl *velerov1api.BackupStorageLocation, backupRepo *velerov1api.BackupRepository, credGetter *credentials.CredentialGetter, repoKeySelector *corev1.SecretKeySelector, log logrus.FieldLogger) (provider.Provider, error) { + return &fakeProvider{}, nil } - actualResult, err := r.Reconcile(ctx, ctrl.Request{ NamespacedName: types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: test.pvb.Name, }, }) - Expect(actualResult).To(BeEquivalentTo(test.expectedRequeue)) if test.expectedErrMsg == "" { Expect(err).To(BeNil()) @@ -137,7 +172,6 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Name: test.pvb.Name, Namespace: test.pvb.Namespace, }, &pvb) - // Assertions if test.expected == nil { Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -160,6 +194,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { pvb: pvbBuilder().Phase("").Node("test_node").Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: true, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseCompleted). @@ -173,6 +208,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: true, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseCompleted). @@ -186,6 +222,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseInProgress). @@ -199,6 +236,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseCompleted). @@ -212,6 +250,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseFailed). @@ -225,6 +264,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseFailed). @@ -238,6 +278,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseNew). @@ -251,6 +292,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseInProgress). @@ -264,6 +306,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseCompleted). @@ -277,6 +320,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { Result(), pod: podBuilder().Result(), bsl: bslBuilder().Result(), + backupRepo: buildBackupRepo(), expectedProcessed: false, expected: builder.ForPodVolumeBackup(velerov1api.DefaultNamespace, "pvb-1"). Phase(velerov1api.PodVolumeBackupPhaseFailed). @@ -286,8 +330,26 @@ var _ = Describe("PodVolumeBackup Reconciler", func() { ) }) -type fakeCredsFileStore struct{} +type fakeProvider struct { +} + +func (f *fakeProvider) RunBackup( + ctx context.Context, + path string, + tags map[string]string, + parentSnapshot string, + updater uploader.ProgressUpdater) (string, bool, error) { + return "", false, nil +} + +func (f *fakeProvider) RunRestore( + ctx context.Context, + snapshotID string, + volumePath string, + updater uploader.ProgressUpdater) error { + return nil +} -func (f fakeCredsFileStore) Path(selector *corev1.SecretKeySelector) (string, error) { - return "/fake/path", nil +func (f *fakeProvider) Close(ctx context.Context) error { + return nil } diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 2b81d363ad..5292e0a01f 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -39,29 +39,39 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/podvolume" + "github.com/vmware-tanzu/velero/pkg/repository" repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/uploader/provider" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" ) -func NewPodVolumeRestoreReconciler(logger logrus.FieldLogger, client client.Client, credentialsFileStore credentials.FileStore) *PodVolumeRestoreReconciler { +func NewPodVolumeRestoreReconciler(logger logrus.FieldLogger, client client.Client, credentialGetter *credentials.CredentialGetter) *PodVolumeRestoreReconciler { return &PodVolumeRestoreReconciler{ - Client: client, - logger: logger.WithField("controller", "PodVolumeRestore"), - credentialsFileStore: credentialsFileStore, - fileSystem: filesystem.NewFileSystem(), - clock: &clock.RealClock{}, + Client: client, + logger: logger.WithField("controller", "PodVolumeRestore"), + credentialGetter: credentialGetter, + fileSystem: filesystem.NewFileSystem(), + clock: &clock.RealClock{}, } } type PodVolumeRestoreReconciler struct { client.Client - logger logrus.FieldLogger - credentialsFileStore credentials.FileStore - fileSystem filesystem.Interface - clock clock.Clock + logger logrus.FieldLogger + credentialGetter *credentials.CredentialGetter + fileSystem filesystem.Interface + clock clock.Clock +} + +type RestoreProgressUpdater struct { + PodVolumeRestore *velerov1api.PodVolumeRestore + Log logrus.FieldLogger + Ctx context.Context + Cli client.Client } // +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete @@ -98,7 +108,7 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req resticInitContainerIndex := getResticInitContainerIndex(pod) if resticInitContainerIndex > 0 { log.Warnf(`Init containers before the %s container may cause issues - if they interfere with volumes being restored: %s index %d`, restic.InitContainer, restic.InitContainer, resticInitContainerIndex) + if they interfere with volumes being restored: %s index %d`, podvolume.InitContainer, podvolume.InitContainer, resticInitContainerIndex) } log.Info("Restore starting") @@ -208,7 +218,7 @@ func isResticInitContainerRunning(pod *corev1api.Pod) bool { func getResticInitContainerIndex(pod *corev1api.Pod) int { // Restic wait container can be anywhere in the list of init containers so locate it. for i, initContainer := range pod.Spec.InitContainers { - if initContainer.Name == restic.InitContainer { + if initContainer.Name == podvolume.InitContainer { return i } } @@ -231,20 +241,6 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve return errors.Wrap(err, "error identifying path of volume") } - credsFile, err := c.credentialsFileStore.Path(repokey.RepoKeySelector()) - if err != nil { - return errors.Wrap(err, "error creating temp restic credentials file") - } - // ignore error since there's nothing we can do and it's a temp file. - defer os.Remove(credsFile) - - resticCmd := restic.RestoreCommand( - req.Spec.RepoIdentifier, - credsFile, - req.Spec.SnapshotID, - volumePath, - ) - backupLocation := &velerov1api.BackupStorageLocation{} if err := c.Get(ctx, client.ObjectKey{ Namespace: req.Namespace, @@ -253,38 +249,32 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve return errors.Wrap(err, "error getting backup storage location") } - // if there's a caCert on the ObjectStorage, write it to disk so that it can be passed to restic - var caCertFile string - if backupLocation.Spec.ObjectStorage != nil && backupLocation.Spec.ObjectStorage.CACert != nil { - caCertFile, err = restic.TempCACertFile(backupLocation.Spec.ObjectStorage.CACert, req.Spec.BackupStorageLocation, c.fileSystem) - if err != nil { - log.WithError(err).Error("Error creating temp cacert file") - } - // ignore error since there's nothing we can do and it's a temp file. - defer os.Remove(caCertFile) + // need to check backup repository in source namespace rather than in pod namespace + // such as in case of namespace mapping issue + backupRepo, err := repository.GetBackupRepository(ctx, c.Client, req.Namespace, repository.BackupRepositoryKey{ + VolumeNamespace: req.Spec.SourceNamespace, + BackupLocation: req.Spec.BackupStorageLocation, + RepositoryType: podvolume.GetPvrRepositoryType(req), + }) + if err != nil { + return errors.Wrap(err, "error getting backup repository") } - resticCmd.CACertFile = caCertFile - env, err := restic.CmdEnv(backupLocation, c.credentialsFileStore) + uploaderProv, err := provider.NewUploaderProvider(ctx, c.Client, req.Spec.UploaderType, + req.Spec.RepoIdentifier, backupLocation, backupRepo, c.credentialGetter, repokey.RepoKeySelector(), log) if err != nil { - return errors.Wrap(err, "error setting restic cmd env") - } - resticCmd.Env = env - - // #4820: restrieve insecureSkipTLSVerify from BSL configuration for - // AWS plugin. If nothing is return, that means insecureSkipTLSVerify - // is not enable for Restic command. - skipTLSRet := restic.GetInsecureSkipTLSVerifyFromBSL(backupLocation, log) - if len(skipTLSRet) > 0 { - resticCmd.ExtraFlags = append(resticCmd.ExtraFlags, skipTLSRet) + return errors.Wrap(err, "error creating uploader") } - var stdout, stderr string + defer func() { + if err := uploaderProv.Close(ctx); err != nil { + log.Errorf("failed to close uploader provider with error %v", err) + } + }() - if stdout, stderr, err = restic.RunRestore(resticCmd, log, c.updateRestoreProgressFunc(req, log)); err != nil { - return errors.Wrapf(err, "error running restic restore, cmd=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) + if err = uploaderProv.RunRestore(ctx, req.Spec.SnapshotID, volumePath, c.NewRestoreProgressUpdater(req, log, ctx)); err != nil { + return errors.Wrapf(err, "error running restore err=%v", err) } - log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) // Remove the .velero directory from the restored volume (it may contain done files from previous restores // of this volume, which we don't want to carry over). If this fails for any reason, log and continue, since @@ -318,14 +308,19 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve return nil } -// updateRestoreProgressFunc returns a func that takes progress info and patches -// the PVR with the new progress -func (c *PodVolumeRestoreReconciler) updateRestoreProgressFunc(req *velerov1api.PodVolumeRestore, log logrus.FieldLogger) func(velerov1api.PodVolumeOperationProgress) { - return func(progress velerov1api.PodVolumeOperationProgress) { - original := req.DeepCopy() - req.Status.Progress = progress - if err := c.Patch(context.Background(), req, client.MergeFrom(original)); err != nil { - log.WithError(err).Error("Unable to update PodVolumeRestore progress") - } +func (r *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater { + return &RestoreProgressUpdater{pvr, log, ctx, r.Client} +} + +//UpdateProgress which implement ProgressUpdater interface to update pvr progress status +func (r *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) { + original := r.PodVolumeRestore.DeepCopy() + r.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone} + if r.Cli == nil { + r.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume) + return + } + if err := r.Cli.Patch(r.Ctx, r.PodVolumeRestore, client.MergeFrom(original)); err != nil { + r.Log.Errorf("update restore pod %s volume %s progress with %v", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume, err) } } diff --git a/pkg/controller/pod_volume_restore_controller_test.go b/pkg/controller/pod_volume_restore_controller_test.go index 69bc313a34..d9cdc5d264 100644 --- a/pkg/controller/pod_volume_restore_controller_test.go +++ b/pkg/controller/pod_volume_restore_controller_test.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -120,7 +120,7 @@ func TestShouldProcess(t *testing.T) { NodeName: controllerNode, InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, }, }, @@ -160,7 +160,7 @@ func TestShouldProcess(t *testing.T) { NodeName: controllerNode, InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, }, }, @@ -260,7 +260,7 @@ func TestIsResticContainerRunning(t *testing.T) { Name: "non-restic-init", }, { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, }, }, @@ -291,7 +291,7 @@ func TestIsResticContainerRunning(t *testing.T) { Spec: corev1api.PodSpec{ InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, { Name: "non-restic-init", @@ -323,7 +323,7 @@ func TestIsResticContainerRunning(t *testing.T) { Spec: corev1api.PodSpec{ InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, { Name: "non-restic-init", @@ -357,7 +357,7 @@ func TestIsResticContainerRunning(t *testing.T) { Spec: corev1api.PodSpec{ InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, }, }, @@ -422,7 +422,7 @@ func TestGetResticInitContainerIndex(t *testing.T) { Name: "non-restic-init", }, { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, }, }, @@ -439,7 +439,7 @@ func TestGetResticInitContainerIndex(t *testing.T) { Spec: corev1api.PodSpec{ InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, { Name: "non-restic-init", @@ -459,7 +459,7 @@ func TestGetResticInitContainerIndex(t *testing.T) { Spec: corev1api.PodSpec{ InitContainers: []corev1api.Container{ { - Name: restic.InitContainer, + Name: podvolume.InitContainer, }, { Name: "non-restic-init", diff --git a/pkg/controller/restic_repository_controller.go b/pkg/controller/restic_repository_controller.go index d6cd869e3e..7328cf78b8 100644 --- a/pkg/controller/restic_repository_controller.go +++ b/pkg/controller/restic_repository_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "strings" "time" "github.com/pkg/errors" @@ -32,44 +31,39 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/repository" repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/kube" ) const ( - repoSyncPeriod = 5 * time.Minute + repoSyncPeriod = 5 * time.Minute + defaultMaintainFrequency = 7 * 24 * time.Hour ) type ResticRepoReconciler struct { client.Client - namespace string - logger logrus.FieldLogger - clock clock.Clock - defaultMaintenanceFrequency time.Duration - repositoryManager repository.Manager + namespace string + logger logrus.FieldLogger + clock clock.Clock + maintenanceFrequency time.Duration + repositoryManager repository.Manager } func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client, - defaultMaintenanceFrequency time.Duration, repositoryManager repository.Manager) *ResticRepoReconciler { + maintenanceFrequency time.Duration, repositoryManager repository.Manager) *ResticRepoReconciler { c := &ResticRepoReconciler{ client, namespace, logger, clock.RealClock{}, - defaultMaintenanceFrequency, + maintenanceFrequency, repositoryManager, } - if c.defaultMaintenanceFrequency <= 0 { - logger.Infof("Invalid default restic maintenance frequency, setting to %v", restic.DefaultMaintenanceFrequency) - c.defaultMaintenanceFrequency = restic.DefaultMaintenanceFrequency - } - return c } func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error { - s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod) + s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod, kube.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). For(&velerov1api.BackupRepository{}). Watches(s, nil). @@ -135,7 +129,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 rr.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady if rr.Spec.MaintenanceFrequency.Duration <= 0 { - rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.defaultMaintenanceFrequency} + rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.getRepositoryMaintenanceFrequency(req)} } }) } @@ -145,7 +139,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 rr.Spec.ResticIdentifier = repoIdentifier if rr.Spec.MaintenanceFrequency.Duration <= 0 { - rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.defaultMaintenanceFrequency} + rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.getRepositoryMaintenanceFrequency(req)} } }); err != nil { return err @@ -161,23 +155,27 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 }) } -// ensureRepo checks to see if a repository exists, and attempts to initialize it if -// it does not exist. An error is returned if the repository can't be connected to -// or initialized. -func ensureRepo(repo *velerov1api.BackupRepository, repoManager repository.Manager) error { - if err := repoManager.ConnectToRepo(repo); err != nil { - // If the repository has not yet been initialized, the error message will always include - // the following string. This is the only scenario where we should try to initialize it. - // Other errors (e.g. "already locked") should be returned as-is since the repository - // does already exist, but it can't be connected to. - if strings.Contains(err.Error(), "Is there a repository at the following location?") { - return repoManager.InitRepo(repo) +func (r *ResticRepoReconciler) getRepositoryMaintenanceFrequency(req *velerov1api.BackupRepository) time.Duration { + if r.maintenanceFrequency > 0 { + r.logger.WithField("frequency", r.maintenanceFrequency).Info("Set user defined maintenance frequency") + return r.maintenanceFrequency + } else { + frequency, err := r.repositoryManager.DefaultMaintenanceFrequency(req) + if err != nil || frequency <= 0 { + r.logger.WithError(err).WithField("returned frequency", frequency).Warn("Failed to get maitanance frequency, use the default one") + frequency = defaultMaintainFrequency + } else { + r.logger.WithField("frequency", frequency).Info("Set matainenance according to repository suggestion") } - return err + return frequency } +} - return nil +// ensureRepo calls repo manager's PrepareRepo to ensure the repo is ready for use. +// An error is returned if the repository can't be connected to or initialized. +func ensureRepo(repo *velerov1api.BackupRepository, repoManager repository.Manager) error { + return repoManager.PrepareRepo(repo) } func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { diff --git a/pkg/controller/restic_repository_controller_test.go b/pkg/controller/restic_repository_controller_test.go index d693f510be..16fa5f983f 100644 --- a/pkg/controller/restic_repository_controller_test.go +++ b/pkg/controller/restic_repository_controller_test.go @@ -15,23 +15,26 @@ package controller import ( "context" + "errors" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository" repomokes "github.com/vmware-tanzu/velero/pkg/repository/mocks" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) -const defaultMaintenanceFrequency = 10 * time.Minute +const testMaintenanceFrequency = 10 * time.Minute func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler { - mgr := &repomokes.RepositoryManager{} + mgr := &repomokes.Manager{} if mockOn != "" { mgr.On(mockOn, arg).Return(ret) } @@ -39,7 +42,7 @@ func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mo velerov1api.DefaultNamespace, velerotest.NewLogger(), velerotest.NewFakeControllerRuntimeClient(t), - defaultMaintenanceFrequency, + testMaintenanceFrequency, mgr, ) } @@ -51,7 +54,7 @@ func mockResticRepositoryCR() *velerov1api.BackupRepository { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, }, } @@ -72,7 +75,7 @@ func TestPatchResticRepository(t *testing.T) { func TestCheckNotReadyRepo(t *testing.T) { rr := mockResticRepositoryCR() - reconciler := mockResticRepoReconciler(t, rr, "ConnectToRepo", rr, nil) + reconciler := mockResticRepoReconciler(t, rr, "PrepareRepo", rr, nil) err := reconciler.Client.Create(context.TODO(), rr) assert.NoError(t, err) err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger) @@ -104,7 +107,7 @@ func TestRunMaintenanceIfDue(t *testing.T) { func TestInitializeRepo(t *testing.T) { rr := mockResticRepositoryCR() rr.Spec.BackupStorageLocation = "default" - reconciler := mockResticRepoReconciler(t, rr, "ConnectToRepo", rr, nil) + reconciler := mockResticRepoReconciler(t, rr, "PrepareRepo", rr, nil) err := reconciler.Client.Create(context.TODO(), rr) assert.NoError(t, err) locations := &velerov1api.BackupStorageLocation{ @@ -138,7 +141,7 @@ func TestResticRepoReconcile(t *testing.T) { Name: "unknown", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, }, }, expectNil: true, @@ -151,7 +154,7 @@ func TestResticRepoReconcile(t *testing.T) { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, }, }, expectNil: true, @@ -164,7 +167,7 @@ func TestResticRepoReconcile(t *testing.T) { Name: "repo", }, Spec: velerov1api.BackupRepositorySpec{ - MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, + MaintenanceFrequency: metav1.Duration{testMaintenanceFrequency}, }, Status: velerov1api.BackupRepositoryStatus{ Phase: velerov1api.BackupRepositoryPhaseNew, @@ -187,3 +190,53 @@ func TestResticRepoReconcile(t *testing.T) { }) } } + +func TestGetRepositoryMaintenanceFrequency(t *testing.T) { + tests := []struct { + name string + mgr repository.Manager + repo *velerov1api.BackupRepository + freqReturn time.Duration + freqError error + userDefinedFreq time.Duration + expectFreq time.Duration + }{ + { + name: "user defined valid", + userDefinedFreq: time.Duration(time.Hour), + expectFreq: time.Duration(time.Hour), + }, + { + name: "repo return valid", + freqReturn: time.Duration(time.Hour * 2), + expectFreq: time.Duration(time.Hour * 2), + }, + { + name: "fall to default", + userDefinedFreq: -1, + freqError: errors.New("fake-error"), + expectFreq: defaultMaintainFrequency, + }, + { + name: "fall to default, no freq error", + freqReturn: -1, + expectFreq: defaultMaintainFrequency, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mgr := repomokes.Manager{} + mgr.On("DefaultMaintenanceFrequency", mock.Anything).Return(test.freqReturn, test.freqError) + reconciler := NewResticRepoReconciler( + velerov1api.DefaultNamespace, + velerotest.NewLogger(), + velerotest.NewFakeControllerRuntimeClient(t), + test.userDefinedFreq, + &mgr, + ) + + freq := reconciler.getRepositoryMaintenanceFrequency(test.repo) + assert.Equal(t, test.expectFreq, freq) + }) + } +} diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index 559611fe09..d14917589b 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -78,6 +78,12 @@ var nonRestorableResources = []string{ // https://github.com/vmware-tanzu/velero/issues/1113 "resticrepositories.velero.io", + // CSINode delegates cluster node for CSI operation. + // VolumeAttachement records PV mounts to which node. + // https://github.com/vmware-tanzu/velero/issues/4823 + "csinodes.storage.k8s.io", + "volumeattachments.storage.k8s.io", + // Backup repositories were renamed from Restic repositories "backuprepositories.velero.io", } diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index 16dd444e94..fff95340e5 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -46,8 +46,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/plugin/framework" pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" pkgrestore "github.com/vmware-tanzu/velero/pkg/restore" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -861,7 +861,7 @@ type fakeRestorer struct { func (r *fakeRestorer) Restore( info pkgrestore.Request, - actions []velero.RestoreItemAction, + actions []riav1.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter pkgrestore.VolumeSnapshotterGetter, ) (pkgrestore.Result, pkgrestore.Result) { diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index e62969b27d..ec6721535d 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -26,8 +26,10 @@ import ( "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/clock" ctrl "sigs.k8s.io/controller-runtime" + bld "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -65,9 +67,18 @@ func NewScheduleReconciler( } func (c *scheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { - s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1.ScheduleList{}, scheduleSyncPeriod) + s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1.ScheduleList{}, scheduleSyncPeriod, kube.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). - For(&velerov1.Schedule{}). + // global predicate, works for both For and Watch + WithEventFilter(kube.NewAllEventPredicate(func(obj client.Object) bool { + schedule := obj.(*velerov1.Schedule) + if pause := schedule.Spec.Paused; pause { + c.logger.Infof("schedule %s is paused, skip", schedule.Name) + return false + } + return true + })). + For(&velerov1.Schedule{}, bld.WithPredicates(kube.SpecChangePredicate{})). Watches(s, nil). Complete(c) } @@ -89,13 +100,6 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, errors.Wrapf(err, "error getting schedule %s", req.String()) } - if schedule.Status.Phase != "" && - schedule.Status.Phase != velerov1.SchedulePhaseNew && - schedule.Status.Phase != velerov1.SchedulePhaseEnabled { - log.Debugf("the schedule phase is %s, isn't %s or %s, skip", schedule.Status.Phase, velerov1.SchedulePhaseNew, velerov1.SchedulePhaseEnabled) - return ctrl.Result{}, nil - } - c.metrics.InitSchedule(schedule.Name) original := schedule.DeepCopy() @@ -124,9 +128,14 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, nil } - // check for the schedule being due to run, and submit a Backup if so - if err := c.submitBackupIfDue(ctx, schedule, cronSchedule); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "error running submitBackupIfDue for schedule %s", req.String()) + // Check for the schedule being due to run. + // If there are backup created by this schedule still in New or InProgress state, + // skip current backup creation to avoid running overlap backups. + // As the schedule must be validated before checking whether it's due, we cannot put the checking log in Predicate + if c.ifDue(schedule, cronSchedule) && !c.checkIfBackupInNewOrProgress(schedule) { + if err := c.submitBackup(ctx, schedule); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "error submit backup for schedule %s", req.String()) + } } return ctrl.Result{}, nil @@ -172,35 +181,63 @@ func parseCronSchedule(itm *velerov1.Schedule, logger logrus.FieldLogger) (cron. return schedule, nil } -func (c *scheduleReconciler) submitBackupIfDue(ctx context.Context, item *velerov1.Schedule, cronSchedule cron.Schedule) error { - var ( - now = c.clock.Now() - isDue, nextRunTime = getNextRunTime(item, cronSchedule, now) - log = c.logger.WithField("schedule", kubeutil.NamespaceAndName(item)) - ) +// checkIfBackupInNewOrProgress check whether there are backups created by this schedule still in New or InProgress state +func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Schedule) bool { + log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule)) + backupList := &velerov1.BackupList{} + options := &client.ListOptions{ + Namespace: schedule.Namespace, + LabelSelector: labels.Set(map[string]string{ + velerov1.ScheduleNameLabel: schedule.Name, + }).AsSelector(), + } + + err := c.List(context.Background(), backupList, options) + if err != nil { + log.Errorf("fail to list backup for schedule %s/%s: %s", schedule.Namespace, schedule.Name, err.Error()) + return true + } + + for _, backup := range backupList.Items { + if backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress { + return true + } + } + + log.Debugf("Schedule %s/%s still has backups are in InProgress or New state, skip submitting backup to avoid overlap.", schedule.Namespace, schedule.Name) + return false +} + +// ifDue check whether schedule is due to create a new backup. +func (c *scheduleReconciler) ifDue(schedule *velerov1.Schedule, cronSchedule cron.Schedule) bool { + isDue, nextRunTime := getNextRunTime(schedule, cronSchedule, c.clock.Now()) + log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule)) if !isDue { log.WithField("nextRunTime", nextRunTime).Debug("Schedule is not due, skipping") - return nil + return false } + return true +} + +// submitBackup create a backup from schedule. +func (c *scheduleReconciler) submitBackup(ctx context.Context, schedule *velerov1.Schedule) error { + c.logger.WithField("schedule", schedule.Namespace+"/"+schedule.Name).Info("Schedule is due, going to submit backup.") + + now := c.clock.Now() // Don't attempt to "catch up" if there are any missed or failed runs - simply // trigger a Backup if it's time. - // - // It might also make sense in the future to explicitly check for currently-running - // backups so that we don't overlap runs (for disk snapshots in particular, this can - // lead to performance issues). - log.WithField("nextRunTime", nextRunTime).Info("Schedule is due, submitting Backup") - backup := getBackup(item, now) + backup := getBackup(schedule, now) if err := c.Create(ctx, backup); err != nil { return errors.Wrap(err, "error creating Backup") } - original := item.DeepCopy() - item.Status.LastBackup = &metav1.Time{Time: now} + original := schedule.DeepCopy() + schedule.Status.LastBackup = &metav1.Time{Time: now} - if err := c.Patch(ctx, item, client.MergeFrom(original)); err != nil { - return errors.Wrapf(err, "error updating Schedule's LastBackup time to %v", item.Status.LastBackup) + if err := c.Patch(ctx, schedule, client.MergeFrom(original)); err != nil { + return errors.Wrapf(err, "error updating Schedule's LastBackup time to %v", schedule.Status.LastBackup) } return nil diff --git a/pkg/controller/schedule_controller_test.go b/pkg/controller/schedule_controller_test.go index c45846a81a..62e28f7a7d 100644 --- a/pkg/controller/schedule_controller_test.go +++ b/pkg/controller/schedule_controller_test.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/metrics" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -40,19 +39,20 @@ import ( func TestReconcileOfSchedule(t *testing.T) { require.Nil(t, velerov1.AddToScheme(scheme.Scheme)) - newScheduleBuilder := func(phase velerov1api.SchedulePhase) *builder.ScheduleBuilder { + newScheduleBuilder := func(phase velerov1.SchedulePhase) *builder.ScheduleBuilder { return builder.ForSchedule("ns", "name").Phase(phase) } tests := []struct { name string scheduleKey string - schedule *velerov1api.Schedule + schedule *velerov1.Schedule fakeClockTime string expectedPhase string expectedValidationErrors []string - expectedBackupCreate *velerov1api.Backup + expectedBackupCreate *velerov1.Backup expectedLastBackup string + backup *velerov1.Backup }{ { name: "missing schedule triggers no backup", @@ -60,49 +60,55 @@ func TestReconcileOfSchedule(t *testing.T) { }, { name: "schedule with phase FailedValidation triggers no backup", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseFailedValidation).Result(), + schedule: newScheduleBuilder(velerov1.SchedulePhaseFailedValidation).Result(), }, { name: "schedule with phase New gets validated and failed if invalid", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseNew).Result(), - expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), + schedule: newScheduleBuilder(velerov1.SchedulePhaseNew).Result(), + expectedPhase: string(velerov1.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase gets validated and failed if invalid", - schedule: newScheduleBuilder(velerov1api.SchedulePhase("")).Result(), - expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), + schedule: newScheduleBuilder(velerov1.SchedulePhase("")).Result(), + expectedPhase: string(velerov1.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase Enabled gets re-validated and failed if invalid", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).Result(), - expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), + schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).Result(), + expectedPhase: string(velerov1.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase New gets validated and triggers a backup", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseNew).CronSchedule("@every 5m").Result(), + schedule: newScheduleBuilder(velerov1.SchedulePhaseNew).CronSchedule("@every 5m").Result(), fakeClockTime: "2017-01-01 12:00:00", - expectedPhase: string(velerov1api.SchedulePhaseEnabled), - expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), + expectedPhase: string(velerov1.SchedulePhaseEnabled), + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, { name: "schedule with phase Enabled gets re-validated and triggers a backup if valid", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).CronSchedule("@every 5m").Result(), + schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).CronSchedule("@every 5m").Result(), fakeClockTime: "2017-01-01 12:00:00", - expectedPhase: string(velerov1api.SchedulePhaseEnabled), - expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), + expectedPhase: string(velerov1.SchedulePhaseEnabled), + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, { name: "schedule that's already run gets LastBackup updated", - schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(), + schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(), fakeClockTime: "2017-01-01 12:00:00", - expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, + { + name: "schedule already has backup in New state.", + schedule: newScheduleBuilder(velerov1.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(), + expectedPhase: string(velerov1.SchedulePhaseEnabled), + backup: builder.ForBackup("ns", "name-20220905120000").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")).Phase(velerov1.BackupPhaseNew).Result(), + }, } for _, test := range tests { @@ -126,11 +132,15 @@ func TestReconcileOfSchedule(t *testing.T) { require.Nil(t, client.Create(ctx, test.schedule)) } + if test.backup != nil { + require.Nil(t, client.Create(ctx, test.backup)) + } + _, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "name"}}) require.Nil(t, err) - schedule := &velerov1api.Schedule{} - err = client.Get(ctx, types.NamespacedName{"ns", "name"}, schedule) + schedule := &velerov1.Schedule{} + err = client.Get(ctx, types.NamespacedName{Namespace: "ns", Name: "name"}, schedule) if len(test.expectedPhase) > 0 { require.Nil(t, err) assert.Equal(t, test.expectedPhase, string(schedule.Status.Phase)) @@ -144,8 +154,19 @@ func TestReconcileOfSchedule(t *testing.T) { assert.Equal(t, parseTime(test.expectedLastBackup).Unix(), schedule.Status.LastBackup.Unix()) } - backups := &velerov1api.BackupList{} + backups := &velerov1.BackupList{} + require.Nil(t, client.List(ctx, backups)) + + // If backup associated with schedule's status is in New or InProgress, + // new backup shouldn't be submitted. + if test.backup != nil && + (test.backup.Status.Phase == velerov1.BackupPhaseNew || test.backup.Status.Phase == velerov1.BackupPhaseInProgress) { + assert.Equal(t, 1, len(backups.Items)) + require.Nil(t, client.Delete(ctx, test.backup)) + } + require.Nil(t, client.List(ctx, backups)) + if test.expectedBackupCreate == nil { assert.Equal(t, 0, len(backups.Items)) } else { @@ -161,13 +182,13 @@ func parseTime(timeString string) time.Time { } func TestGetNextRunTime(t *testing.T) { - defaultSchedule := func() *velerov1api.Schedule { + defaultSchedule := func() *velerov1.Schedule { return builder.ForSchedule("velero", "schedule-1").CronSchedule("@every 5m").Result() } tests := []struct { name string - schedule *velerov1api.Schedule + schedule *velerov1.Schedule lastRanOffset string expectedDue bool expectedNextRunTimeOffset string @@ -294,21 +315,21 @@ func TestParseCronSchedule(t *testing.T) { func TestGetBackup(t *testing.T) { tests := []struct { name string - schedule *velerov1api.Schedule + schedule *velerov1.Schedule testClockTime string - expectedBackup *velerov1api.Backup + expectedBackup *velerov1.Backup }{ { name: "ensure name is formatted correctly (AM time)", schedule: builder.ForSchedule("foo", "bar").Result(), testClockTime: "2017-07-25 09:15:00", - expectedBackup: builder.ForBackup("foo", "bar-20170725091500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")).Result(), + expectedBackup: builder.ForBackup("foo", "bar-20170725091500").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "bar")).Result(), }, { name: "ensure name is formatted correctly (PM time)", schedule: builder.ForSchedule("foo", "bar").Result(), testClockTime: "2017-07-25 14:15:00", - expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")).Result(), + expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "bar")).Result(), }, { name: "ensure schedule backup template is copied", @@ -325,7 +346,7 @@ func TestGetBackup(t *testing.T) { Result(), testClockTime: "2017-07-25 09:15:00", expectedBackup: builder.ForBackup("foo", "bar-20170725091500"). - ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")). + ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "bar")). IncludedNamespaces("ns-1", "ns-2"). ExcludedNamespaces("ns-3"). IncludedResources("foo", "bar"). @@ -338,13 +359,13 @@ func TestGetBackup(t *testing.T) { name: "ensure schedule labels are copied", schedule: builder.ForSchedule("foo", "bar").ObjectMeta(builder.WithLabels("foo", "bar", "bar", "baz")).Result(), testClockTime: "2017-07-25 14:15:00", - expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar", "bar", "baz", "foo", "bar")).Result(), + expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "bar", "bar", "baz", "foo", "bar")).Result(), }, { name: "ensure schedule annotations are copied", schedule: builder.ForSchedule("foo", "bar").ObjectMeta(builder.WithAnnotations("foo", "bar", "bar", "baz")).Result(), testClockTime: "2017-07-25 14:15:00", - expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar"), builder.WithAnnotations("bar", "baz", "foo", "bar")).Result(), + expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "bar"), builder.WithAnnotations("bar", "baz", "foo", "bar")).Result(), }, } @@ -363,3 +384,41 @@ func TestGetBackup(t *testing.T) { }) } } + +func TestCheckIfBackupInNewOrProgress(t *testing.T) { + require.Nil(t, velerov1.AddToScheme(scheme.Scheme)) + + client := fake.NewClientBuilder().WithScheme(scheme.Scheme).Build() + logger := velerotest.NewLogger() + + // Create testing schedule + testSchedule := builder.ForSchedule("ns", "name").Phase(velerov1.SchedulePhaseEnabled).Result() + err := client.Create(ctx, testSchedule) + require.NoError(t, err, "fail to create schedule in TestCheckIfBackupInNewOrProgress: %v", err) + + // Create backup in New phase. + newBackup := builder.ForBackup("ns", "backup-1"). + ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")). + Phase(velerov1.BackupPhaseNew).Result() + err = client.Create(ctx, newBackup) + require.NoError(t, err, "fail to create backup in New phase in TestCheckIfBackupInNewOrProgress: %v", err) + + reconciler := NewScheduleReconciler("ns", logger, client, metrics.NewServerMetrics()) + result := reconciler.checkIfBackupInNewOrProgress(testSchedule) + assert.True(t, result) + + // Clean backup in New phase. + err = client.Delete(ctx, newBackup) + require.NoError(t, err, "fail to delete backup in New phase in TestCheckIfBackupInNewOrProgress: %v", err) + + // Create backup in InProgress phase. + inProgressBackup := builder.ForBackup("ns", "backup-2"). + ObjectMeta(builder.WithLabels(velerov1.ScheduleNameLabel, "name")). + Phase(velerov1.BackupPhaseInProgress).Result() + err = client.Create(ctx, inProgressBackup) + require.NoError(t, err, "fail to create backup in InProgress phase in TestCheckIfBackupInNewOrProgress: %v", err) + + reconciler = NewScheduleReconciler("namespace", logger, client, metrics.NewServerMetrics()) + result = reconciler.checkIfBackupInNewOrProgress(testSchedule) + assert.True(t, result) +} diff --git a/pkg/controller/server_status_request_controller.go b/pkg/controller/server_status_request_controller.go index 73b693968e..50ae606276 100644 --- a/pkg/controller/server_status_request_controller.go +++ b/pkg/controller/server_status_request_controller.go @@ -24,7 +24,6 @@ import ( "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -34,6 +33,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/buildinfo" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) const ( @@ -43,24 +43,39 @@ const ( type PluginLister interface { // List returns all PluginIdentifiers for kind. - List(kind framework.PluginKind) []framework.PluginIdentifier + List(kind common.PluginKind) []framework.PluginIdentifier } -// ServerStatusRequestReconciler reconciles a ServerStatusRequest object -type ServerStatusRequestReconciler struct { - Scheme *runtime.Scheme - Client client.Client - Ctx context.Context - PluginRegistry PluginLister - Clock clock.Clock +// serverStatusRequestReconciler reconciles a ServerStatusRequest object +type serverStatusRequestReconciler struct { + client client.Client + ctx context.Context + pluginRegistry PluginLister + clock clock.Clock - Log logrus.FieldLogger + log logrus.FieldLogger +} + +// NewServerStatusRequestReconciler initializes and returns serverStatusRequestReconciler struct. +func NewServerStatusRequestReconciler( + client client.Client, + ctx context.Context, + pluginRegistry PluginLister, + clock clock.Clock, + log logrus.FieldLogger) *serverStatusRequestReconciler { + return &serverStatusRequestReconciler{ + client: client, + ctx: ctx, + pluginRegistry: pluginRegistry, + clock: clock, + log: log, + } } // +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests/status,verbs=get;update;patch -func (r *ServerStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Log.WithFields(logrus.Fields{ +func (r *serverStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.log.WithFields(logrus.Fields{ "controller": ServerStatusRequest, "serverStatusRequest": req.NamespacedName, }) @@ -68,7 +83,7 @@ func (r *ServerStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl. // Fetch the ServerStatusRequest instance. log.Debug("Getting ServerStatusRequest") statusRequest := &velerov1api.ServerStatusRequest{} - if err := r.Client.Get(r.Ctx, req.NamespacedName, statusRequest); err != nil { + if err := r.client.Get(r.ctx, req.NamespacedName, statusRequest); err != nil { if apierrors.IsNotFound(err) { log.Debug("Unable to find ServerStatusRequest") return ctrl.Result{}, nil @@ -78,7 +93,7 @@ func (r *ServerStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, err } - log = r.Log.WithFields(logrus.Fields{ + log = r.log.WithFields(logrus.Fields{ "controller": ServerStatusRequest, "serverStatusRequest": req.NamespacedName, "phase": statusRequest.Status.Phase, @@ -90,23 +105,23 @@ func (r *ServerStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl. original := statusRequest.DeepCopy() statusRequest.Status.ServerVersion = buildinfo.Version statusRequest.Status.Phase = velerov1api.ServerStatusRequestPhaseProcessed - statusRequest.Status.ProcessedTimestamp = &metav1.Time{Time: r.Clock.Now()} - statusRequest.Status.Plugins = velero.GetInstalledPluginInfo(r.PluginRegistry) + statusRequest.Status.ProcessedTimestamp = &metav1.Time{Time: r.clock.Now()} + statusRequest.Status.Plugins = velero.GetInstalledPluginInfo(r.pluginRegistry) - if err := r.Client.Patch(r.Ctx, statusRequest, client.MergeFrom(original)); err != nil { + if err := r.client.Patch(r.ctx, statusRequest, client.MergeFrom(original)); err != nil { log.WithError(err).Error("Error updating ServerStatusRequest status") return ctrl.Result{RequeueAfter: statusRequestResyncPeriod}, err } case velerov1api.ServerStatusRequestPhaseProcessed: log.Debug("Checking whether ServerStatusRequest has expired") expiration := statusRequest.Status.ProcessedTimestamp.Add(ttl) - if expiration.After(r.Clock.Now()) { + if expiration.After(r.clock.Now()) { log.Debug("ServerStatusRequest has not expired") return ctrl.Result{RequeueAfter: statusRequestResyncPeriod}, nil } log.Debug("ServerStatusRequest has expired, deleting it") - if err := r.Client.Delete(r.Ctx, statusRequest); err != nil { + if err := r.client.Delete(r.ctx, statusRequest); err != nil { log.WithError(err).Error("Unable to delete the request") return ctrl.Result{}, nil } @@ -119,7 +134,7 @@ func (r *ServerStatusRequestReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{RequeueAfter: statusRequestResyncPeriod}, nil } -func (r *ServerStatusRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *serverStatusRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&velerov1api.ServerStatusRequest{}). WithOptions(controller.Options{ diff --git a/pkg/controller/server_status_request_controller_test.go b/pkg/controller/server_status_request_controller_test.go index d91f6d5d1c..f86ed49523 100644 --- a/pkg/controller/server_status_request_controller_test.go +++ b/pkg/controller/server_status_request_controller_test.go @@ -36,6 +36,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/buildinfo" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -62,15 +63,15 @@ var _ = Describe("Server Status Request Reconciler", func() { func(test request) { // Setup reconciler Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed()) - r := ServerStatusRequestReconciler{ - Client: fake.NewFakeClientWithScheme(scheme.Scheme, test.req), - Ctx: context.Background(), - PluginRegistry: test.reqPluginLister, - Clock: clock.NewFakeClock(now), - Log: velerotest.NewLogger(), - } + r := NewServerStatusRequestReconciler( + fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(test.req).Build(), + context.Background(), + test.reqPluginLister, + clock.NewFakeClock(now), + velerotest.NewLogger(), + ) - actualResult, err := r.Reconcile(r.Ctx, ctrl.Request{ + actualResult, err := r.Reconcile(r.ctx, ctrl.Request{ NamespacedName: types.NamespacedName{ Namespace: velerov1api.DefaultNamespace, Name: test.req.Name, @@ -86,7 +87,7 @@ var _ = Describe("Server Status Request Reconciler", func() { } instance := &velerov1api.ServerStatusRequest{} - err = r.Client.Get(ctx, kbclient.ObjectKey{Name: test.req.Name, Namespace: test.req.Namespace}, instance) + err = r.client.Get(ctx, kbclient.ObjectKey{Name: test.req.Name, Namespace: test.req.Namespace}, instance) // Assertions if test.expected == nil { @@ -247,7 +248,7 @@ type fakePluginLister struct { plugins []framework.PluginIdentifier } -func (l *fakePluginLister) List(kind framework.PluginKind) []framework.PluginIdentifier { +func (l *fakePluginLister) List(kind common.PluginKind) []framework.PluginIdentifier { var plugins []framework.PluginIdentifier for _, plugin := range l.plugins { if plugin.Kind == kind { diff --git a/pkg/controller/suite_test.go b/pkg/controller/suite_test.go index b5c5b4d087..89e6159a1f 100644 --- a/pkg/controller/suite_test.go +++ b/pkg/controller/suite_test.go @@ -28,7 +28,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/persistence" persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks" - "k8s.io/klog" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index cf34f175f1..d14e09e94e 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -32,19 +32,19 @@ import ( type podTemplateOption func(*podTemplateConfig) type podTemplateConfig struct { - image string - envVars []corev1.EnvVar - restoreOnly bool - annotations map[string]string - labels map[string]string - resources corev1.ResourceRequirements - withSecret bool - defaultResticMaintenanceFrequency time.Duration - garbageCollectionFrequency time.Duration - plugins []string - features []string - defaultVolumesToRestic bool - uploaderType string + image string + envVars []corev1.EnvVar + restoreOnly bool + annotations map[string]string + labels map[string]string + resources corev1.ResourceRequirements + withSecret bool + defaultRepoMaintenanceFrequency time.Duration + garbageCollectionFrequency time.Duration + plugins []string + features []string + defaultVolumesToFsBackup bool + uploaderType string } func WithImage(image string) podTemplateOption { @@ -99,9 +99,9 @@ func WithResources(resources corev1.ResourceRequirements) podTemplateOption { } } -func WithDefaultResticMaintenanceFrequency(val time.Duration) podTemplateOption { +func WithDefaultRepoMaintenanceFrequency(val time.Duration) podTemplateOption { return func(c *podTemplateConfig) { - c.defaultResticMaintenanceFrequency = val + c.defaultRepoMaintenanceFrequency = val } } @@ -129,9 +129,9 @@ func WithUploaderType(t string) podTemplateOption { } } -func WithDefaultVolumesToRestic() podTemplateOption { +func WithDefaultVolumesToFsBackup() podTemplateOption { return func(c *podTemplateConfig) { - c.defaultVolumesToRestic = true + c.defaultVolumesToFsBackup = true } } @@ -157,8 +157,8 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment args = append(args, fmt.Sprintf("--features=%s", strings.Join(c.features, ","))) } - if c.defaultVolumesToRestic { - args = append(args, "--default-volumes-to-restic=true") + if c.defaultVolumesToFsBackup { + args = append(args, "--default-volumes-to-fs-backup=true") } if len(c.uploaderType) > 0 { @@ -288,8 +288,8 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--restore-only") } - if c.defaultResticMaintenanceFrequency > 0 { - deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, fmt.Sprintf("--default-restic-prune-frequency=%v", c.defaultResticMaintenanceFrequency)) + if c.defaultRepoMaintenanceFrequency > 0 { + deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, fmt.Sprintf("--default-repo-maintain-frequency=%v", c.defaultRepoMaintenanceFrequency)) } if c.garbageCollectionFrequency > 0 { diff --git a/pkg/install/deployment_test.go b/pkg/install/deployment_test.go index ef5f871a06..604af44dfd 100644 --- a/pkg/install/deployment_test.go +++ b/pkg/install/deployment_test.go @@ -46,9 +46,9 @@ func TestDeployment(t *testing.T) { assert.Equal(t, 7, len(deploy.Spec.Template.Spec.Containers[0].Env)) assert.Equal(t, 3, len(deploy.Spec.Template.Spec.Volumes)) - deploy = Deployment("velero", WithDefaultResticMaintenanceFrequency(24*time.Hour)) + deploy = Deployment("velero", WithDefaultRepoMaintenanceFrequency(24*time.Hour)) assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2) - assert.Equal(t, "--default-restic-prune-frequency=24h0m0s", deploy.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--default-repo-maintain-frequency=24h0m0s", deploy.Spec.Template.Spec.Containers[0].Args[1]) deploy = Deployment("velero", WithGarbageCollectionFrequency(24*time.Hour)) assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Args, 2) diff --git a/pkg/install/install.go b/pkg/install/install.go index ebe4c3751e..e163be95ef 100644 --- a/pkg/install/install.go +++ b/pkg/install/install.go @@ -208,7 +208,7 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e } // DaemonSetIsReady will poll the kubernetes API server to ensure the restic daemonset is ready, i.e. that -// pods are scheduled and available on all of the the desired nodes. +// pods are scheduled and available on all of the desired nodes. func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, error) { gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "DaemonSet") apiResource := metav1.APIResource{ diff --git a/pkg/install/resources.go b/pkg/install/resources.go index 7053b87c59..aa9b5f237e 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -209,30 +209,30 @@ func appendUnstructured(list *unstructured.UnstructuredList, obj runtime.Object) } type VeleroOptions struct { - Namespace string - Image string - ProviderName string - Bucket string - Prefix string - PodAnnotations map[string]string - PodLabels map[string]string - ServiceAccountAnnotations map[string]string - VeleroPodResources corev1.ResourceRequirements - ResticPodResources corev1.ResourceRequirements - SecretData []byte - RestoreOnly bool - UseRestic bool - UseVolumeSnapshots bool - BSLConfig map[string]string - VSLConfig map[string]string - DefaultResticMaintenanceFrequency time.Duration - GarbageCollectionFrequency time.Duration - Plugins []string - NoDefaultBackupLocation bool - CACertData []byte - Features []string - DefaultVolumesToRestic bool - UploaderType string + Namespace string + Image string + ProviderName string + Bucket string + Prefix string + PodAnnotations map[string]string + PodLabels map[string]string + ServiceAccountAnnotations map[string]string + VeleroPodResources corev1.ResourceRequirements + ResticPodResources corev1.ResourceRequirements + SecretData []byte + RestoreOnly bool + UseRestic bool + UseVolumeSnapshots bool + BSLConfig map[string]string + VSLConfig map[string]string + DefaultRepoMaintenanceFrequency time.Duration + GarbageCollectionFrequency time.Duration + Plugins []string + NoDefaultBackupLocation bool + CACertData []byte + Features []string + DefaultVolumesToFsBackup bool + UploaderType string } func AllCRDs() *unstructured.UnstructuredList { @@ -272,7 +272,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { appendUnstructured(resources, bsl) } - // A snapshot location may not be desirable for users relying on restic + // A snapshot location may not be desirable for users relying on pod volume backup/restore if o.UseVolumeSnapshots { vsl := VolumeSnapshotLocation(o.Namespace, o.ProviderName, o.VSLConfig) appendUnstructured(resources, vsl) @@ -286,7 +286,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { WithImage(o.Image), WithResources(o.VeleroPodResources), WithSecret(secretPresent), - WithDefaultResticMaintenanceFrequency(o.DefaultResticMaintenanceFrequency), + WithDefaultRepoMaintenanceFrequency(o.DefaultRepoMaintenanceFrequency), WithGarbageCollectionFrequency(o.GarbageCollectionFrequency), WithUploaderType(o.UploaderType), } @@ -303,8 +303,8 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { deployOpts = append(deployOpts, WithPlugins(o.Plugins)) } - if o.DefaultVolumesToRestic { - deployOpts = append(deployOpts, WithDefaultVolumesToRestic()) + if o.DefaultVolumesToFsBackup { + deployOpts = append(deployOpts, WithDefaultVolumesToFsBackup()) } deploy := Deployment(o.Namespace, deployOpts...) diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go new file mode 100644 index 0000000000..fcfd10931f --- /dev/null +++ b/pkg/nodeagent/node_agent.go @@ -0,0 +1,75 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeagent + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "k8s.io/client-go/kubernetes" + + "github.com/vmware-tanzu/velero/pkg/util/kube" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +const ( + // daemonSet is the name of the Velero node agent daemonset. + daemonSet = "restic" +) + +var ( + DaemonsetNotFound = errors.New("daemonset not found") +) + +// IsRunning checks if the node agent daemonset is running properly. If not, return the error found +func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { + if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) { + return DaemonsetNotFound + } else if err != nil { + return err + } else { + return nil + } +} + +// IsRunningInNode checks if the node agent pod is running properly in a specified node. If not, return the error found +func IsRunningInNode(ctx context.Context, namespace string, nodeName string, podClient corev1client.PodsGetter) error { + if nodeName == "" { + return errors.New("node name is empty") + } + + pods, err := podClient.Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("name=%s", daemonSet)}) + if err != nil { + return errors.Wrap(err, "failed to list daemonset pods") + } + + for _, pod := range pods.Items { + if kube.IsPodRunning(&pod) != nil { + continue + } + + if pod.Spec.NodeName == nodeName { + return nil + } + } + + return errors.Errorf("daemonset pod not found in running state in node %s", nodeName) +} diff --git a/pkg/persistence/object_store_layout.go b/pkg/persistence/object_store_layout.go index cad7479e0f..7042c40b37 100644 --- a/pkg/persistence/object_store_layout.go +++ b/pkg/persistence/object_store_layout.go @@ -40,6 +40,7 @@ func NewObjectStoreLayout(prefix string) *ObjectStoreLayout { "restic": path.Join(prefix, "restic") + "/", "metadata": path.Join(prefix, "metadata") + "/", "plugins": path.Join(prefix, "plugins") + "/", + "kopia": path.Join(prefix, "kopia") + "/", } return &ObjectStoreLayout{ diff --git a/pkg/plugin/clientmgmt/restartable_backup_item_action.go b/pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action.go similarity index 52% rename from pkg/plugin/clientmgmt/restartable_backup_item_action.go rename to pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action.go index c8e96ab805..6cbca3f525 100644 --- a/pkg/plugin/clientmgmt/restartable_backup_item_action.go +++ b/pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action.go @@ -14,44 +14,65 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package v1 import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" ) -// restartableBackupItemAction is a backup item action for a given implementation (such as "pod"). It is associated with +// AdaptedBackupItemAction is a backup item action adapted to the v1 BackupItemAction API +type AdaptedBackupItemAction struct { + Kind common.PluginKind + + // Get returns a restartable BackupItemAction for the given name and process, wrapping if necessary + GetRestartable func(name string, restartableProcess process.RestartableProcess) biav1.BackupItemAction +} + +func AdaptedBackupItemActions() []AdaptedBackupItemAction { + return []AdaptedBackupItemAction{ + { + Kind: common.PluginKindBackupItemAction, + GetRestartable: func(name string, restartableProcess process.RestartableProcess) biav1.BackupItemAction { + return NewRestartableBackupItemAction(name, restartableProcess) + }, + }, + } +} + +// RestartableBackupItemAction is a backup item action for a given implementation (such as "pod"). It is associated with // a restartableProcess, which may be shared and used to run multiple plugins. At the beginning of each method // call, the restartableBackupItemAction asks its restartableProcess to restart itself if needed (e.g. if the // process terminated for any reason), then it proceeds with the actual call. -type restartableBackupItemAction struct { - key kindAndName - sharedPluginProcess RestartableProcess +type RestartableBackupItemAction struct { + Key process.KindAndName + SharedPluginProcess process.RestartableProcess } -// newRestartableBackupItemAction returns a new restartableBackupItemAction. -func newRestartableBackupItemAction(name string, sharedPluginProcess RestartableProcess) *restartableBackupItemAction { - r := &restartableBackupItemAction{ - key: kindAndName{kind: framework.PluginKindBackupItemAction, name: name}, - sharedPluginProcess: sharedPluginProcess, +// NewRestartableBackupItemAction returns a new RestartableBackupItemAction. +func NewRestartableBackupItemAction(name string, sharedPluginProcess process.RestartableProcess) *RestartableBackupItemAction { + r := &RestartableBackupItemAction{ + Key: process.KindAndName{Kind: common.PluginKindBackupItemAction, Name: name}, + SharedPluginProcess: sharedPluginProcess, } return r } // getBackupItemAction returns the backup item action for this restartableBackupItemAction. It does *not* restart the // plugin process. -func (r *restartableBackupItemAction) getBackupItemAction() (velero.BackupItemAction, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) +func (r *RestartableBackupItemAction) getBackupItemAction() (biav1.BackupItemAction, error) { + plugin, err := r.SharedPluginProcess.GetByKindAndName(r.Key) if err != nil { return nil, err } - backupItemAction, ok := plugin.(velero.BackupItemAction) + backupItemAction, ok := plugin.(biav1.BackupItemAction) if !ok { return nil, errors.Errorf("%T is not a BackupItemAction!", plugin) } @@ -60,8 +81,8 @@ func (r *restartableBackupItemAction) getBackupItemAction() (velero.BackupItemAc } // getDelegate restarts the plugin process (if needed) and returns the backup item action for this restartableBackupItemAction. -func (r *restartableBackupItemAction) getDelegate() (velero.BackupItemAction, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { +func (r *RestartableBackupItemAction) getDelegate() (biav1.BackupItemAction, error) { + if err := r.SharedPluginProcess.ResetIfNeeded(); err != nil { return nil, err } @@ -69,7 +90,7 @@ func (r *restartableBackupItemAction) getDelegate() (velero.BackupItemAction, er } // AppliesTo restarts the plugin's process if needed, then delegates the call. -func (r *restartableBackupItemAction) AppliesTo() (velero.ResourceSelector, error) { +func (r *RestartableBackupItemAction) AppliesTo() (velero.ResourceSelector, error) { delegate, err := r.getDelegate() if err != nil { return velero.ResourceSelector{}, err @@ -79,7 +100,7 @@ func (r *restartableBackupItemAction) AppliesTo() (velero.ResourceSelector, erro } // Execute restarts the plugin's process if needed, then delegates the call. -func (r *restartableBackupItemAction) Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { +func (r *RestartableBackupItemAction) Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { delegate, err := r.getDelegate() if err != nil { return nil, nil, err diff --git a/pkg/plugin/clientmgmt/restartable_backup_item_action_test.go b/pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action_test.go similarity index 60% rename from pkg/plugin/clientmgmt/restartable_backup_item_action_test.go rename to pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action_test.go index 7f528e73a7..53bf0e4de8 100644 --- a/pkg/plugin/clientmgmt/restartable_backup_item_action_test.go +++ b/pkg/plugin/clientmgmt/backupitemaction/v1/restartable_backup_item_action_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package v1 import ( "testing" @@ -25,10 +25,12 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/vmware-tanzu/velero/internal/restartabletest" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/backup/mocks" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + mocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/backupitemaction/v1" ) func TestRestartableGetBackupItemAction(t *testing.T) { @@ -50,20 +52,20 @@ func TestRestartableGetBackupItemAction(t *testing.T) { }, { name: "happy path", - plugin: new(mocks.ItemAction), + plugin: new(mocks.BackupItemAction), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) name := "pod" - key := kindAndName{kind: framework.PluginKindBackupItemAction, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindBackupItemAction, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) - r := newRestartableBackupItemAction(name, p) + r := NewRestartableBackupItemAction(name, p) a, err := r.getBackupItemAction() if tc.expectedError != "" { assert.EqualError(t, err, tc.expectedError) @@ -77,22 +79,22 @@ func TestRestartableGetBackupItemAction(t *testing.T) { } func TestRestartableBackupItemActionGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "pod" - r := newRestartableBackupItemAction(name, p) + r := NewRestartableBackupItemAction(name, p) a, err := r.getDelegate() assert.Nil(t, a) assert.EqualError(t, err, "reset error") // Happy path - p.On("resetIfNeeded").Return(nil) - expected := new(mocks.ItemAction) - key := kindAndName{kind: framework.PluginKindBackupItemAction, name: name} - p.On("getByKindAndName", key).Return(expected, nil) + p.On("ResetIfNeeded").Return(nil) + expected := new(mocks.BackupItemAction) + key := process.KindAndName{Kind: common.PluginKindBackupItemAction, Name: name} + p.On("GetByKindAndName", key).Return(expected, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -120,29 +122,29 @@ func TestRestartableBackupItemActionDelegatedFunctions(t *testing.T) { }, } - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindBackupItemAction, - func(key kindAndName, p RestartableProcess) interface{} { - return &restartableBackupItemAction{ - key: key, - sharedPluginProcess: p, + common.PluginKindBackupItemAction, + func(key process.KindAndName, p process.RestartableProcess) interface{} { + return &RestartableBackupItemAction{ + Key: key, + SharedPluginProcess: p, } }, - func() mockable { - return new(mocks.ItemAction) + func() restartabletest.Mockable { + return new(mocks.BackupItemAction) }, - restartableDelegateTest{ - function: "AppliesTo", - inputs: []interface{}{}, - expectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "AppliesTo", + Inputs: []interface{}{}, + ExpectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "Execute", - inputs: []interface{}{pv, b}, - expectedErrorOutputs: []interface{}{nil, ([]velero.ResourceIdentifier)(nil), errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{pvToReturn, additionalItems, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "Execute", + Inputs: []interface{}{pv, b}, + ExpectedErrorOutputs: []interface{}{nil, ([]velero.ResourceIdentifier)(nil), errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{pvToReturn, additionalItems, errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/clientmgmt/manager.go b/pkg/plugin/clientmgmt/manager.go index 9dc2ff3274..b94d986fa6 100644 --- a/pkg/plugin/clientmgmt/manager.go +++ b/pkg/plugin/clientmgmt/manager.go @@ -17,15 +17,23 @@ limitations under the License. package clientmgmt import ( + "errors" + "fmt" "strings" "sync" - v1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" - "github.com/sirupsen/logrus" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + biav1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/backupitemaction/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + riav1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/restoreitemaction/v1" + vsv1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/volumesnapshotter/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" + isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" ) // Manager manages the lifecycles of plugins. @@ -34,19 +42,19 @@ type Manager interface { GetObjectStore(name string) (velero.ObjectStore, error) // GetVolumeSnapshotter returns the VolumeSnapshotter plugin for name. - GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) + GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) - // GetBackupItemActions returns all backup item action plugins. - GetBackupItemActions() ([]velero.BackupItemAction, error) + // GetBackupItemActions returns all v1 backup item action plugins. + GetBackupItemActions() ([]biav1.BackupItemAction, error) // GetBackupItemAction returns the backup item action plugin for name. - GetBackupItemAction(name string) (velero.BackupItemAction, error) + GetBackupItemAction(name string) (biav1.BackupItemAction, error) // GetRestoreItemActions returns all restore item action plugins. - GetRestoreItemActions() ([]velero.RestoreItemAction, error) + GetRestoreItemActions() ([]riav1.RestoreItemAction, error) // GetRestoreItemAction returns the restore item action plugin for name. - GetRestoreItemAction(name string) (velero.RestoreItemAction, error) + GetRestoreItemAction(name string) (riav1.RestoreItemAction, error) // GetDeleteItemActions returns all delete item action plugins. GetDeleteItemActions() ([]velero.DeleteItemAction, error) @@ -55,38 +63,41 @@ type Manager interface { GetDeleteItemAction(name string) (velero.DeleteItemAction, error) // GetItemSnapshotter returns the item snapshotter plugin for name - GetItemSnapshotter(name string) (v1.ItemSnapshotter, error) + GetItemSnapshotter(name string) (isv1.ItemSnapshotter, error) // GetItemSnapshotters returns all item snapshotter plugins - GetItemSnapshotters() ([]v1.ItemSnapshotter, error) + GetItemSnapshotters() ([]isv1.ItemSnapshotter, error) // CleanupClients terminates all of the Manager's running plugin processes. CleanupClients() } +// Used checking for adapted plugin versions +var pluginNotFoundErrType = &process.PluginNotFoundError{} + // manager implements Manager. type manager struct { logger logrus.FieldLogger logLevel logrus.Level - registry Registry + registry process.Registry - restartableProcessFactory RestartableProcessFactory + restartableProcessFactory process.RestartableProcessFactory // lock guards restartableProcesses lock sync.Mutex - restartableProcesses map[string]RestartableProcess + restartableProcesses map[string]process.RestartableProcess } // NewManager constructs a manager for getting plugins. -func NewManager(logger logrus.FieldLogger, level logrus.Level, registry Registry) Manager { +func NewManager(logger logrus.FieldLogger, level logrus.Level, registry process.Registry) Manager { return &manager{ logger: logger, logLevel: level, registry: registry, - restartableProcessFactory: newRestartableProcessFactory(), + restartableProcessFactory: process.NewRestartableProcessFactory(), - restartableProcesses: make(map[string]RestartableProcess), + restartableProcesses: make(map[string]process.RestartableProcess), } } @@ -94,7 +105,7 @@ func (m *manager) CleanupClients() { m.lock.Lock() for _, restartableProcess := range m.restartableProcesses { - restartableProcess.stop() + restartableProcess.Stop() } m.lock.Unlock() @@ -102,7 +113,7 @@ func (m *manager) CleanupClients() { // getRestartableProcess returns a restartableProcess for a plugin identified by kind and name, creating a // restartableProcess if it is the first time it has been requested. -func (m *manager) getRestartableProcess(kind framework.PluginKind, name string) (RestartableProcess, error) { +func (m *manager) getRestartableProcess(kind common.PluginKind, name string) (process.RestartableProcess, error) { m.lock.Lock() defer m.lock.Unlock() @@ -127,7 +138,7 @@ func (m *manager) getRestartableProcess(kind framework.PluginKind, name string) logger.Debug("creating new restartable plugin process") - restartableProcess, err = m.restartableProcessFactory.newRestartableProcess(info.Command, m.logger, m.logLevel) + restartableProcess, err = m.restartableProcessFactory.NewRestartableProcess(info.Command, m.logger, m.logLevel) if err != nil { return nil, err } @@ -141,35 +152,39 @@ func (m *manager) getRestartableProcess(kind framework.PluginKind, name string) func (m *manager) GetObjectStore(name string) (velero.ObjectStore, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindObjectStore, name) + restartableProcess, err := m.getRestartableProcess(common.PluginKindObjectStore, name) if err != nil { return nil, err } - r := newRestartableObjectStore(name, restartableProcess) + r := NewRestartableObjectStore(name, restartableProcess) return r, nil } // GetVolumeSnapshotter returns a restartableVolumeSnapshotter for name. -func (m *manager) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { +func (m *manager) GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindVolumeSnapshotter, name) - if err != nil { - return nil, err + for _, adaptedVolumeSnapshotter := range vsv1cli.AdaptedVolumeSnapshotters() { + restartableProcess, err := m.getRestartableProcess(adaptedVolumeSnapshotter.Kind, name) + // Check if plugin was not found + if errors.As(err, &pluginNotFoundErrType) { + continue + } + if err != nil { + return nil, err + } + return adaptedVolumeSnapshotter.GetRestartable(name, restartableProcess), nil } - - r := newRestartableVolumeSnapshotter(name, restartableProcess) - - return r, nil + return nil, fmt.Errorf("unable to get valid VolumeSnapshotter for %q", name) } // GetBackupItemActions returns all backup item actions as restartableBackupItemActions. -func (m *manager) GetBackupItemActions() ([]velero.BackupItemAction, error) { - list := m.registry.List(framework.PluginKindBackupItemAction) +func (m *manager) GetBackupItemActions() ([]biav1.BackupItemAction, error) { + list := m.registry.List(common.PluginKindBackupItemAction) - actions := make([]velero.BackupItemAction, 0, len(list)) + actions := make([]biav1.BackupItemAction, 0, len(list)) for i := range list { id := list[i] @@ -186,23 +201,28 @@ func (m *manager) GetBackupItemActions() ([]velero.BackupItemAction, error) { } // GetBackupItemAction returns a restartableBackupItemAction for name. -func (m *manager) GetBackupItemAction(name string) (velero.BackupItemAction, error) { +func (m *manager) GetBackupItemAction(name string) (biav1.BackupItemAction, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindBackupItemAction, name) - if err != nil { - return nil, err + for _, adaptedBackupItemAction := range biav1cli.AdaptedBackupItemActions() { + restartableProcess, err := m.getRestartableProcess(adaptedBackupItemAction.Kind, name) + // Check if plugin was not found + if errors.As(err, &pluginNotFoundErrType) { + continue + } + if err != nil { + return nil, err + } + return adaptedBackupItemAction.GetRestartable(name, restartableProcess), nil } - - r := newRestartableBackupItemAction(name, restartableProcess) - return r, nil + return nil, fmt.Errorf("unable to get valid BackupItemAction for %q", name) } // GetRestoreItemActions returns all restore item actions as restartableRestoreItemActions. -func (m *manager) GetRestoreItemActions() ([]velero.RestoreItemAction, error) { - list := m.registry.List(framework.PluginKindRestoreItemAction) +func (m *manager) GetRestoreItemActions() ([]riav1.RestoreItemAction, error) { + list := m.registry.List(common.PluginKindRestoreItemAction) - actions := make([]velero.RestoreItemAction, 0, len(list)) + actions := make([]riav1.RestoreItemAction, 0, len(list)) for i := range list { id := list[i] @@ -219,21 +239,26 @@ func (m *manager) GetRestoreItemActions() ([]velero.RestoreItemAction, error) { } // GetRestoreItemAction returns a restartableRestoreItemAction for name. -func (m *manager) GetRestoreItemAction(name string) (velero.RestoreItemAction, error) { +func (m *manager) GetRestoreItemAction(name string) (riav1.RestoreItemAction, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindRestoreItemAction, name) - if err != nil { - return nil, err + for _, adaptedRestoreItemAction := range riav1cli.AdaptedRestoreItemActions() { + restartableProcess, err := m.getRestartableProcess(adaptedRestoreItemAction.Kind, name) + // Check if plugin was not found + if errors.As(err, &pluginNotFoundErrType) { + continue + } + if err != nil { + return nil, err + } + return adaptedRestoreItemAction.GetRestartable(name, restartableProcess), nil } - - r := newRestartableRestoreItemAction(name, restartableProcess) - return r, nil + return nil, fmt.Errorf("unable to get valid RestoreItemAction for %q", name) } // GetDeleteItemActions returns all delete item actions as restartableDeleteItemActions. func (m *manager) GetDeleteItemActions() ([]velero.DeleteItemAction, error) { - list := m.registry.List(framework.PluginKindDeleteItemAction) + list := m.registry.List(common.PluginKindDeleteItemAction) actions := make([]velero.DeleteItemAction, 0, len(list)) @@ -255,31 +280,31 @@ func (m *manager) GetDeleteItemActions() ([]velero.DeleteItemAction, error) { func (m *manager) GetDeleteItemAction(name string) (velero.DeleteItemAction, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindDeleteItemAction, name) + restartableProcess, err := m.getRestartableProcess(common.PluginKindDeleteItemAction, name) if err != nil { return nil, err } - r := newRestartableDeleteItemAction(name, restartableProcess) + r := NewRestartableDeleteItemAction(name, restartableProcess) return r, nil } -func (m *manager) GetItemSnapshotter(name string) (v1.ItemSnapshotter, error) { +func (m *manager) GetItemSnapshotter(name string) (isv1.ItemSnapshotter, error) { name = sanitizeName(name) - restartableProcess, err := m.getRestartableProcess(framework.PluginKindItemSnapshotter, name) + restartableProcess, err := m.getRestartableProcess(common.PluginKindItemSnapshotter, name) if err != nil { return nil, err } - r := newRestartableItemSnapshotter(name, restartableProcess) + r := NewRestartableItemSnapshotter(name, restartableProcess) return r, nil } -func (m *manager) GetItemSnapshotters() ([]v1.ItemSnapshotter, error) { - list := m.registry.List(framework.PluginKindItemSnapshotter) +func (m *manager) GetItemSnapshotters() ([]isv1.ItemSnapshotter, error) { + list := m.registry.List(common.PluginKindItemSnapshotter) - actions := make([]v1.ItemSnapshotter, 0, len(list)) + actions := make([]isv1.ItemSnapshotter, 0, len(list)) for i := range list { id := list[i] diff --git a/pkg/plugin/clientmgmt/manager_test.go b/pkg/plugin/clientmgmt/manager_test.go index cc36bc6531..17b51a52b2 100644 --- a/pkg/plugin/clientmgmt/manager_test.go +++ b/pkg/plugin/clientmgmt/manager_test.go @@ -26,7 +26,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/vmware-tanzu/velero/internal/restartabletest" + biav1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/backupitemaction/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + riav1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/restoreitemaction/v1" + vsv1cli "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -39,12 +45,12 @@ func (r *mockRegistry) DiscoverPlugins() error { return args.Error(0) } -func (r *mockRegistry) List(kind framework.PluginKind) []framework.PluginIdentifier { +func (r *mockRegistry) List(kind common.PluginKind) []framework.PluginIdentifier { args := r.Called(kind) return args.Get(0).([]framework.PluginIdentifier) } -func (r *mockRegistry) Get(kind framework.PluginKind, name string) (framework.PluginIdentifier, error) { +func (r *mockRegistry) Get(kind common.PluginKind, name string) (framework.PluginIdentifier, error) { args := r.Called(kind, name) var id framework.PluginIdentifier if args.Get(0) != nil { @@ -72,42 +78,15 @@ type mockRestartableProcessFactory struct { mock.Mock } -func (f *mockRestartableProcessFactory) newRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) { +func (f *mockRestartableProcessFactory) NewRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (process.RestartableProcess, error) { args := f.Called(command, logger, logLevel) - var rp RestartableProcess + var rp process.RestartableProcess if args.Get(0) != nil { - rp = args.Get(0).(RestartableProcess) + rp = args.Get(0).(process.RestartableProcess) } return rp, args.Error(1) } -type mockRestartableProcess struct { - mock.Mock -} - -func (rp *mockRestartableProcess) addReinitializer(key kindAndName, r reinitializer) { - rp.Called(key, r) -} - -func (rp *mockRestartableProcess) reset() error { - args := rp.Called() - return args.Error(0) -} - -func (rp *mockRestartableProcess) resetIfNeeded() error { - args := rp.Called() - return args.Error(0) -} - -func (rp *mockRestartableProcess) getByKindAndName(key kindAndName) (interface{}, error) { - args := rp.Called(key) - return args.Get(0), args.Error(1) -} - -func (rp *mockRestartableProcess) stop() { - rp.Called() -} - func TestGetRestartableProcess(t *testing.T) { logger := test.NewLogger() logLevel := logrus.InfoLevel @@ -121,7 +100,7 @@ func TestGetRestartableProcess(t *testing.T) { m.restartableProcessFactory = factory // Test 1: registry error - pluginKind := framework.PluginKindBackupItemAction + pluginKind := common.PluginKindBackupItemAction pluginName := "pod" registry.On("Get", pluginKind, pluginName).Return(nil, errors.Errorf("registry")).Once() rp, err := m.getRestartableProcess(pluginKind, pluginName) @@ -135,15 +114,15 @@ func TestGetRestartableProcess(t *testing.T) { Name: pluginName, } registry.On("Get", pluginKind, pluginName).Return(podID, nil) - factory.On("newRestartableProcess", podID.Command, logger, logLevel).Return(nil, errors.Errorf("factory")).Once() + factory.On("NewRestartableProcess", podID.Command, logger, logLevel).Return(nil, errors.Errorf("factory")).Once() rp, err = m.getRestartableProcess(pluginKind, pluginName) assert.Nil(t, rp) assert.EqualError(t, err, "factory") // Test 3: registry ok, factory ok - restartableProcess := &mockRestartableProcess{} + restartableProcess := &restartabletest.MockRestartableProcess{} defer restartableProcess.AssertExpectations(t) - factory.On("newRestartableProcess", podID.Command, logger, logLevel).Return(restartableProcess, nil).Once() + factory.On("NewRestartableProcess", podID.Command, logger, logLevel).Return(restartableProcess, nil).Once() rp, err = m.getRestartableProcess(pluginKind, pluginName) require.NoError(t, err) assert.Equal(t, restartableProcess, rp) @@ -164,9 +143,9 @@ func TestCleanupClients(t *testing.T) { m := NewManager(logger, logLevel, registry).(*manager) for i := 0; i < 5; i++ { - rp := &mockRestartableProcess{} + rp := &restartabletest.MockRestartableProcess{} defer rp.AssertExpectations(t) - rp.On("stop") + rp.On("Stop") m.restartableProcesses[fmt.Sprintf("rp%d", i)] = rp } @@ -175,14 +154,14 @@ func TestCleanupClients(t *testing.T) { func TestGetObjectStore(t *testing.T) { getPluginTest(t, - framework.PluginKindObjectStore, + common.PluginKindObjectStore, "velero.io/aws", func(m Manager, name string) (interface{}, error) { return m.GetObjectStore(name) }, - func(name string, sharedPluginProcess RestartableProcess) interface{} { + func(name string, sharedPluginProcess process.RestartableProcess) interface{} { return &restartableObjectStore{ - key: kindAndName{kind: framework.PluginKindObjectStore, name: name}, + key: process.KindAndName{Kind: common.PluginKindObjectStore, Name: name}, sharedPluginProcess: sharedPluginProcess, } }, @@ -192,15 +171,15 @@ func TestGetObjectStore(t *testing.T) { func TestGetVolumeSnapshotter(t *testing.T) { getPluginTest(t, - framework.PluginKindVolumeSnapshotter, + common.PluginKindVolumeSnapshotter, "velero.io/aws", func(m Manager, name string) (interface{}, error) { return m.GetVolumeSnapshotter(name) }, - func(name string, sharedPluginProcess RestartableProcess) interface{} { - return &restartableVolumeSnapshotter{ - key: kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name}, - sharedPluginProcess: sharedPluginProcess, + func(name string, sharedPluginProcess process.RestartableProcess) interface{} { + return &vsv1cli.RestartableVolumeSnapshotter{ + Key: process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name}, + SharedPluginProcess: sharedPluginProcess, } }, true, @@ -209,15 +188,15 @@ func TestGetVolumeSnapshotter(t *testing.T) { func TestGetBackupItemAction(t *testing.T) { getPluginTest(t, - framework.PluginKindBackupItemAction, + common.PluginKindBackupItemAction, "velero.io/pod", func(m Manager, name string) (interface{}, error) { return m.GetBackupItemAction(name) }, - func(name string, sharedPluginProcess RestartableProcess) interface{} { - return &restartableBackupItemAction{ - key: kindAndName{kind: framework.PluginKindBackupItemAction, name: name}, - sharedPluginProcess: sharedPluginProcess, + func(name string, sharedPluginProcess process.RestartableProcess) interface{} { + return &biav1cli.RestartableBackupItemAction{ + Key: process.KindAndName{Kind: common.PluginKindBackupItemAction, Name: name}, + SharedPluginProcess: sharedPluginProcess, } }, false, @@ -226,15 +205,15 @@ func TestGetBackupItemAction(t *testing.T) { func TestGetRestoreItemAction(t *testing.T) { getPluginTest(t, - framework.PluginKindRestoreItemAction, + common.PluginKindRestoreItemAction, "velero.io/pod", func(m Manager, name string) (interface{}, error) { return m.GetRestoreItemAction(name) }, - func(name string, sharedPluginProcess RestartableProcess) interface{} { - return &restartableRestoreItemAction{ - key: kindAndName{kind: framework.PluginKindRestoreItemAction, name: name}, - sharedPluginProcess: sharedPluginProcess, + func(name string, sharedPluginProcess process.RestartableProcess) interface{} { + return &riav1cli.RestartableRestoreItemAction{ + Key: process.KindAndName{Kind: common.PluginKindRestoreItemAction, Name: name}, + SharedPluginProcess: sharedPluginProcess, } }, false, @@ -243,10 +222,10 @@ func TestGetRestoreItemAction(t *testing.T) { func getPluginTest( t *testing.T, - kind framework.PluginKind, + kind common.PluginKind, name string, getPluginFunc func(m Manager, name string) (interface{}, error), - expectedResultFunc func(name string, sharedPluginProcess RestartableProcess) interface{}, + expectedResultFunc func(name string, sharedPluginProcess process.RestartableProcess) interface{}, reinitializable bool, ) { logger := test.NewLogger() @@ -269,22 +248,22 @@ func getPluginTest( } registry.On("Get", pluginKind, pluginName).Return(pluginID, nil) - restartableProcess := &mockRestartableProcess{} + restartableProcess := &restartabletest.MockRestartableProcess{} defer restartableProcess.AssertExpectations(t) // Test 1: error getting restartable process - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("newRestartableProcess")).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("NewRestartableProcess")).Once() actual, err := getPluginFunc(m, pluginName) assert.Nil(t, actual) - assert.EqualError(t, err, "newRestartableProcess") + assert.EqualError(t, err, "NewRestartableProcess") // Test 2: happy path - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() expected := expectedResultFunc(name, restartableProcess) if reinitializable { - key := kindAndName{kind: pluginID.Kind, name: pluginID.Name} - restartableProcess.On("addReinitializer", key, expected) + key := process.KindAndName{Kind: pluginID.Kind, Name: pluginID.Name} + restartableProcess.On("AddReinitializer", key, expected) } actual, err = getPluginFunc(m, pluginName) @@ -306,8 +285,8 @@ func TestGetBackupItemActions(t *testing.T) { { name: "Error getting restartable process", names: []string{"velero.io/a", "velero.io/b", "velero.io/c"}, - newRestartableProcessError: errors.Errorf("newRestartableProcess"), - expectedError: "newRestartableProcess", + newRestartableProcessError: errors.Errorf("NewRestartableProcess"), + expectedError: "NewRestartableProcess", }, { name: "Happy path", @@ -327,7 +306,7 @@ func TestGetBackupItemActions(t *testing.T) { defer factory.AssertExpectations(t) m.restartableProcessFactory = factory - pluginKind := framework.PluginKindBackupItemAction + pluginKind := common.PluginKindBackupItemAction var pluginIDs []framework.PluginIdentifier for i := range tc.names { pluginID := framework.PluginIdentifier{ @@ -346,23 +325,23 @@ func TestGetBackupItemActions(t *testing.T) { registry.On("Get", pluginKind, pluginName).Return(pluginID, nil) - restartableProcess := &mockRestartableProcess{} + restartableProcess := &restartabletest.MockRestartableProcess{} defer restartableProcess.AssertExpectations(t) - expected := &restartableBackupItemAction{ - key: kindAndName{kind: pluginKind, name: pluginName}, - sharedPluginProcess: restartableProcess, + expected := &biav1cli.RestartableBackupItemAction{ + Key: process.KindAndName{Kind: pluginKind, Name: pluginName}, + SharedPluginProcess: restartableProcess, } if tc.newRestartableProcessError != nil { // Test 1: error getting restartable process - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("newRestartableProcess")).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("NewRestartableProcess")).Once() break } // Test 2: happy path if i == 0 { - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() } expectedActions = append(expectedActions, expected) @@ -371,7 +350,7 @@ func TestGetBackupItemActions(t *testing.T) { backupItemActions, err := m.GetBackupItemActions() if tc.newRestartableProcessError != nil { assert.Nil(t, backupItemActions) - assert.EqualError(t, err, "newRestartableProcess") + assert.EqualError(t, err, "NewRestartableProcess") } else { require.NoError(t, err) var actual []interface{} @@ -398,8 +377,8 @@ func TestGetRestoreItemActions(t *testing.T) { { name: "Error getting restartable process", names: []string{"velero.io/a", "velero.io/b", "velero.io/c"}, - newRestartableProcessError: errors.Errorf("newRestartableProcess"), - expectedError: "newRestartableProcess", + newRestartableProcessError: errors.Errorf("NewRestartableProcess"), + expectedError: "NewRestartableProcess", }, { name: "Happy path", @@ -419,7 +398,7 @@ func TestGetRestoreItemActions(t *testing.T) { defer factory.AssertExpectations(t) m.restartableProcessFactory = factory - pluginKind := framework.PluginKindRestoreItemAction + pluginKind := common.PluginKindRestoreItemAction var pluginIDs []framework.PluginIdentifier for i := range tc.names { pluginID := framework.PluginIdentifier{ @@ -438,23 +417,23 @@ func TestGetRestoreItemActions(t *testing.T) { registry.On("Get", pluginKind, pluginName).Return(pluginID, nil) - restartableProcess := &mockRestartableProcess{} + restartableProcess := &restartabletest.MockRestartableProcess{} defer restartableProcess.AssertExpectations(t) - expected := &restartableRestoreItemAction{ - key: kindAndName{kind: pluginKind, name: pluginName}, - sharedPluginProcess: restartableProcess, + expected := &riav1cli.RestartableRestoreItemAction{ + Key: process.KindAndName{Kind: pluginKind, Name: pluginName}, + SharedPluginProcess: restartableProcess, } if tc.newRestartableProcessError != nil { // Test 1: error getting restartable process - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("newRestartableProcess")).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("NewRestartableProcess")).Once() break } // Test 2: happy path if i == 0 { - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() } expectedActions = append(expectedActions, expected) @@ -463,7 +442,7 @@ func TestGetRestoreItemActions(t *testing.T) { restoreItemActions, err := m.GetRestoreItemActions() if tc.newRestartableProcessError != nil { assert.Nil(t, restoreItemActions) - assert.EqualError(t, err, "newRestartableProcess") + assert.EqualError(t, err, "NewRestartableProcess") } else { require.NoError(t, err) var actual []interface{} @@ -478,14 +457,14 @@ func TestGetRestoreItemActions(t *testing.T) { func TestGetDeleteItemAction(t *testing.T) { getPluginTest(t, - framework.PluginKindDeleteItemAction, + common.PluginKindDeleteItemAction, "velero.io/deleter", func(m Manager, name string) (interface{}, error) { return m.GetDeleteItemAction(name) }, - func(name string, sharedPluginProcess RestartableProcess) interface{} { + func(name string, sharedPluginProcess process.RestartableProcess) interface{} { return &restartableDeleteItemAction{ - key: kindAndName{kind: framework.PluginKindDeleteItemAction, name: name}, + key: process.KindAndName{Kind: common.PluginKindDeleteItemAction, Name: name}, sharedPluginProcess: sharedPluginProcess, } }, @@ -518,7 +497,7 @@ func TestGetDeleteItemActions(t *testing.T) { defer factory.AssertExpectations(t) m.restartableProcessFactory = factory - pluginKind := framework.PluginKindDeleteItemAction + pluginKind := common.PluginKindDeleteItemAction var pluginIDs []framework.PluginIdentifier for i := range tc.names { pluginID := framework.PluginIdentifier{ @@ -537,23 +516,23 @@ func TestGetDeleteItemActions(t *testing.T) { registry.On("Get", pluginKind, pluginName).Return(pluginID, nil) - restartableProcess := &mockRestartableProcess{} + restartableProcess := &restartabletest.MockRestartableProcess{} defer restartableProcess.AssertExpectations(t) - expected := &restartableRestoreItemAction{ - key: kindAndName{kind: pluginKind, name: pluginName}, - sharedPluginProcess: restartableProcess, + expected := &riav1cli.RestartableRestoreItemAction{ + Key: process.KindAndName{Kind: pluginKind, Name: pluginName}, + SharedPluginProcess: restartableProcess, } if tc.newRestartableProcessError != nil { // Test 1: error getting restartable process - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("newRestartableProcess")).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(nil, errors.Errorf("NewRestartableProcess")).Once() break } // Test 2: happy path if i == 0 { - factory.On("newRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() + factory.On("NewRestartableProcess", pluginID.Command, logger, logLevel).Return(restartableProcess, nil).Once() } expectedActions = append(expectedActions, expected) @@ -562,7 +541,7 @@ func TestGetDeleteItemActions(t *testing.T) { deleteItemActions, err := m.GetDeleteItemActions() if tc.newRestartableProcessError != nil { assert.Nil(t, deleteItemActions) - assert.EqualError(t, err, "newRestartableProcess") + assert.EqualError(t, err, "NewRestartableProcess") } else { require.NoError(t, err) var actual []interface{} diff --git a/pkg/plugin/clientmgmt/client_builder.go b/pkg/plugin/clientmgmt/process/client_builder.go similarity index 74% rename from pkg/plugin/clientmgmt/client_builder.go rename to pkg/plugin/clientmgmt/process/client_builder.go index 76e3b1985d..4b6c27731f 100644 --- a/pkg/plugin/clientmgmt/client_builder.go +++ b/pkg/plugin/clientmgmt/process/client_builder.go @@ -15,7 +15,7 @@ limitations under the License. */ // Package clientmgmt contains the plugin client for Velero. -package clientmgmt +package process import ( "os" @@ -27,6 +27,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) // clientBuilder builds go-plugin Clients. @@ -67,13 +68,13 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig { HandshakeConfig: framework.Handshake(), AllowedProtocols: []hcplugin.Protocol{hcplugin.ProtocolGRPC}, Plugins: map[string]hcplugin.Plugin{ - string(framework.PluginKindBackupItemAction): framework.NewBackupItemActionPlugin(framework.ClientLogger(b.clientLogger)), - string(framework.PluginKindVolumeSnapshotter): framework.NewVolumeSnapshotterPlugin(framework.ClientLogger(b.clientLogger)), - string(framework.PluginKindObjectStore): framework.NewObjectStorePlugin(framework.ClientLogger(b.clientLogger)), - string(framework.PluginKindPluginLister): &framework.PluginListerPlugin{}, - string(framework.PluginKindRestoreItemAction): framework.NewRestoreItemActionPlugin(framework.ClientLogger(b.clientLogger)), - string(framework.PluginKindDeleteItemAction): framework.NewDeleteItemActionPlugin(framework.ClientLogger(b.clientLogger)), - string(framework.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(framework.ClientLogger(b.clientLogger)), + string(common.PluginKindBackupItemAction): framework.NewBackupItemActionPlugin(common.ClientLogger(b.clientLogger)), + string(common.PluginKindVolumeSnapshotter): framework.NewVolumeSnapshotterPlugin(common.ClientLogger(b.clientLogger)), + string(common.PluginKindObjectStore): framework.NewObjectStorePlugin(common.ClientLogger(b.clientLogger)), + string(common.PluginKindPluginLister): &framework.PluginListerPlugin{}, + string(common.PluginKindRestoreItemAction): framework.NewRestoreItemActionPlugin(common.ClientLogger(b.clientLogger)), + string(common.PluginKindDeleteItemAction): framework.NewDeleteItemActionPlugin(common.ClientLogger(b.clientLogger)), + string(common.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(common.ClientLogger(b.clientLogger)), }, Logger: b.pluginLogger, Cmd: exec.Command(b.commandName, b.commandArgs...), diff --git a/pkg/plugin/clientmgmt/client_builder_test.go b/pkg/plugin/clientmgmt/process/client_builder_test.go similarity index 73% rename from pkg/plugin/clientmgmt/client_builder_test.go rename to pkg/plugin/clientmgmt/process/client_builder_test.go index e922c3d002..2a3f6df615 100644 --- a/pkg/plugin/clientmgmt/client_builder_test.go +++ b/pkg/plugin/clientmgmt/process/client_builder_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "os" @@ -27,6 +27,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -60,13 +61,13 @@ func TestClientConfig(t *testing.T) { HandshakeConfig: framework.Handshake(), AllowedProtocols: []hcplugin.Protocol{hcplugin.ProtocolGRPC}, Plugins: map[string]hcplugin.Plugin{ - string(framework.PluginKindBackupItemAction): framework.NewBackupItemActionPlugin(framework.ClientLogger(logger)), - string(framework.PluginKindVolumeSnapshotter): framework.NewVolumeSnapshotterPlugin(framework.ClientLogger(logger)), - string(framework.PluginKindObjectStore): framework.NewObjectStorePlugin(framework.ClientLogger(logger)), - string(framework.PluginKindPluginLister): &framework.PluginListerPlugin{}, - string(framework.PluginKindRestoreItemAction): framework.NewRestoreItemActionPlugin(framework.ClientLogger(logger)), - string(framework.PluginKindDeleteItemAction): framework.NewDeleteItemActionPlugin(framework.ClientLogger(logger)), - string(framework.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(framework.ClientLogger(logger)), + string(common.PluginKindBackupItemAction): framework.NewBackupItemActionPlugin(common.ClientLogger(logger)), + string(common.PluginKindVolumeSnapshotter): framework.NewVolumeSnapshotterPlugin(common.ClientLogger(logger)), + string(common.PluginKindObjectStore): framework.NewObjectStorePlugin(common.ClientLogger(logger)), + string(common.PluginKindPluginLister): &framework.PluginListerPlugin{}, + string(common.PluginKindRestoreItemAction): framework.NewRestoreItemActionPlugin(common.ClientLogger(logger)), + string(common.PluginKindDeleteItemAction): framework.NewDeleteItemActionPlugin(common.ClientLogger(logger)), + string(common.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(common.ClientLogger(logger)), }, Logger: cb.pluginLogger, Cmd: exec.Command(cb.commandName, cb.commandArgs...), diff --git a/pkg/plugin/clientmgmt/logrus_adapter.go b/pkg/plugin/clientmgmt/process/logrus_adapter.go similarity index 99% rename from pkg/plugin/clientmgmt/logrus_adapter.go rename to pkg/plugin/clientmgmt/process/logrus_adapter.go index 3bb55d4647..cb73da789c 100644 --- a/pkg/plugin/clientmgmt/logrus_adapter.go +++ b/pkg/plugin/clientmgmt/process/logrus_adapter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "fmt" diff --git a/pkg/plugin/clientmgmt/logrus_adapter_test.go b/pkg/plugin/clientmgmt/process/logrus_adapter_test.go similarity index 99% rename from pkg/plugin/clientmgmt/logrus_adapter_test.go rename to pkg/plugin/clientmgmt/process/logrus_adapter_test.go index ccfdfa8286..dae7f8ce2f 100644 --- a/pkg/plugin/clientmgmt/logrus_adapter_test.go +++ b/pkg/plugin/clientmgmt/process/logrus_adapter_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "testing" diff --git a/pkg/plugin/clientmgmt/process.go b/pkg/plugin/clientmgmt/process/process.go similarity index 90% rename from pkg/plugin/clientmgmt/process.go rename to pkg/plugin/clientmgmt/process/process.go index f2fc8ae367..b6be276871 100644 --- a/pkg/plugin/clientmgmt/process.go +++ b/pkg/plugin/clientmgmt/process/process.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "strings" @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) type ProcessFactory interface { @@ -42,7 +42,7 @@ func (pf *processFactory) newProcess(command string, logger logrus.FieldLogger, } type Process interface { - dispense(key kindAndName) (interface{}, error) + dispense(key KindAndName) (interface{}, error) exited() bool kill() } @@ -124,21 +124,21 @@ func removeFeaturesFlag(args []string) []string { return commandArgs } -func (r *process) dispense(key kindAndName) (interface{}, error) { +func (r *process) dispense(key KindAndName) (interface{}, error) { // This calls GRPCClient(clientConn) on the plugin instance registered for key.name. - dispensed, err := r.protocolClient.Dispense(key.kind.String()) + dispensed, err := r.protocolClient.Dispense(key.Kind.String()) if err != nil { return nil, errors.WithStack(err) } // Currently all plugins except for PluginLister dispense clientDispenser instances. - if clientDispenser, ok := dispensed.(framework.ClientDispenser); ok { - if key.name == "" { - return nil, errors.Errorf("%s plugin requested but name is missing", key.kind.String()) + if clientDispenser, ok := dispensed.(common.ClientDispenser); ok { + if key.Name == "" { + return nil, errors.Errorf("%s plugin requested but name is missing", key.Kind.String()) } // Get the instance that implements our plugin interface (e.g. ObjectStore) that is a gRPC-based // client - dispensed = clientDispenser.ClientFor(key.name) + dispensed = clientDispenser.ClientFor(key.Name) } return dispensed, nil diff --git a/pkg/plugin/clientmgmt/process_test.go b/pkg/plugin/clientmgmt/process/process_test.go similarity index 91% rename from pkg/plugin/clientmgmt/process_test.go rename to pkg/plugin/clientmgmt/process/process_test.go index ac82ade87a..ce7c7c07b9 100644 --- a/pkg/plugin/clientmgmt/process_test.go +++ b/pkg/plugin/clientmgmt/process/process_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "testing" @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" ) type mockClientProtocol struct { @@ -94,20 +95,20 @@ func TestDispense(t *testing.T) { var client interface{} - key := kindAndName{} + key := KindAndName{} if tc.clientDispenser { - key.kind = framework.PluginKindObjectStore - protocolClient.On("Dispense", key.kind.String()).Return(clientDispenser, tc.dispenseError) + key.Kind = common.PluginKindObjectStore + protocolClient.On("Dispense", key.Kind.String()).Return(clientDispenser, tc.dispenseError) if !tc.missingKeyName { - key.name = "aws" + key.Name = "aws" client = &framework.BackupItemActionGRPCClient{} - clientDispenser.On("ClientFor", key.name).Return(client) + clientDispenser.On("ClientFor", key.Name).Return(client) } } else { - key.kind = framework.PluginKindPluginLister + key.Kind = common.PluginKindPluginLister client = &framework.PluginListerGRPCClient{} - protocolClient.On("Dispense", key.kind.String()).Return(client, tc.dispenseError) + protocolClient.On("Dispense", key.Kind.String()).Return(client, tc.dispenseError) } dispensed, err := p.dispense(key) diff --git a/pkg/plugin/clientmgmt/registry.go b/pkg/plugin/clientmgmt/process/registry.go similarity index 78% rename from pkg/plugin/clientmgmt/registry.go rename to pkg/plugin/clientmgmt/process/registry.go index fe8a7f4b44..11a6bf43ed 100644 --- a/pkg/plugin/clientmgmt/registry.go +++ b/pkg/plugin/clientmgmt/process/registry.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "fmt" @@ -25,6 +25,7 @@ import ( "github.com/sirupsen/logrus" "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -33,15 +34,15 @@ type Registry interface { // DiscoverPlugins discovers all available plugins. DiscoverPlugins() error // List returns all PluginIdentifiers for kind. - List(kind framework.PluginKind) []framework.PluginIdentifier + List(kind common.PluginKind) []framework.PluginIdentifier // Get returns the PluginIdentifier for kind and name. - Get(kind framework.PluginKind, name string) (framework.PluginIdentifier, error) + Get(kind common.PluginKind, name string) (framework.PluginIdentifier, error) } -// kindAndName is a convenience struct that combines a PluginKind and a name. -type kindAndName struct { - kind framework.PluginKind - name string +// KindAndName is a convenience struct that combines a PluginKind and a name. +type KindAndName struct { + Kind common.PluginKind + Name string } // registry implements Registry. @@ -53,8 +54,8 @@ type registry struct { processFactory ProcessFactory fs filesystem.Interface - pluginsByID map[kindAndName]framework.PluginIdentifier - pluginsByKind map[framework.PluginKind][]framework.PluginIdentifier + pluginsByID map[KindAndName]framework.PluginIdentifier + pluginsByKind map[common.PluginKind][]framework.PluginIdentifier } // NewRegistry returns a new registry. @@ -66,8 +67,8 @@ func NewRegistry(dir string, logger logrus.FieldLogger, logLevel logrus.Level) R processFactory: newProcessFactory(), fs: filesystem.NewFileSystem(), - pluginsByID: make(map[kindAndName]framework.PluginIdentifier), - pluginsByKind: make(map[framework.PluginKind][]framework.PluginIdentifier), + pluginsByID: make(map[KindAndName]framework.PluginIdentifier), + pluginsByKind: make(map[common.PluginKind][]framework.PluginIdentifier), } } @@ -110,14 +111,14 @@ func (r *registry) discoverPlugins(commands []string) error { // List returns info about all plugin binaries that implement the given // PluginKind. -func (r *registry) List(kind framework.PluginKind) []framework.PluginIdentifier { +func (r *registry) List(kind common.PluginKind) []framework.PluginIdentifier { return r.pluginsByKind[kind] } // Get returns info about a plugin with the given name and kind, or an // error if one cannot be found. -func (r *registry) Get(kind framework.PluginKind, name string) (framework.PluginIdentifier, error) { - p, found := r.pluginsByID[kindAndName{kind: kind, name: name}] +func (r *registry) Get(kind common.PluginKind, name string) (framework.PluginIdentifier, error) { + p, found := r.pluginsByID[KindAndName{Kind: kind, Name: name}] if !found { return framework.PluginIdentifier{}, newPluginNotFoundError(kind, name) } @@ -182,7 +183,7 @@ func (r *registry) listPlugins(command string) ([]framework.PluginIdentifier, er } defer process.kill() - plugin, err := process.dispense(kindAndName{kind: framework.PluginKindPluginLister}) + plugin, err := process.dispense(KindAndName{Kind: common.PluginKindPluginLister}) if err != nil { return nil, err } @@ -197,37 +198,46 @@ func (r *registry) listPlugins(command string) ([]framework.PluginIdentifier, er // register registers a PluginIdentifier with the registry. func (r *registry) register(id framework.PluginIdentifier) error { - key := kindAndName{kind: id.Kind, name: id.Name} + key := KindAndName{Kind: id.Kind, Name: id.Name} if existing, found := r.pluginsByID[key]; found { return newDuplicatePluginRegistrationError(existing, id) } // no need to pass list of existing plugins since the check if this exists was done above - if err := framework.ValidatePluginName(id.Name, nil); err != nil { + if err := common.ValidatePluginName(id.Name, nil); err != nil { return errors.Errorf("invalid plugin name %q: %s", id.Name, err) } r.pluginsByID[key] = id r.pluginsByKind[id.Kind] = append(r.pluginsByKind[id.Kind], id) + // if id.Kind is adaptable to newer plugin versions, list it under the other versions as well + // If BackupItemAction is adaptable to BackupItemActionV2, then it would be listed under both + // kinds + if kinds, ok := common.PluginKindsAdaptableTo[id.Kind]; ok { + for _, kind := range kinds { + r.pluginsByKind[kind] = append(r.pluginsByKind[kind], id) + } + } + return nil } // pluginNotFoundError indicates a plugin could not be located for kind and name. -type pluginNotFoundError struct { - kind framework.PluginKind +type PluginNotFoundError struct { + kind common.PluginKind name string } // newPluginNotFoundError returns a new pluginNotFoundError for kind and name. -func newPluginNotFoundError(kind framework.PluginKind, name string) *pluginNotFoundError { - return &pluginNotFoundError{ +func newPluginNotFoundError(kind common.PluginKind, name string) *PluginNotFoundError { + return &PluginNotFoundError{ kind: kind, name: name, } } -func (e *pluginNotFoundError) Error() string { +func (e *PluginNotFoundError) Error() string { return fmt.Sprintf("unable to locate %v plugin named %s", e.kind, e.name) } diff --git a/pkg/plugin/clientmgmt/registry_test.go b/pkg/plugin/clientmgmt/process/registry_test.go similarity index 99% rename from pkg/plugin/clientmgmt/registry_test.go rename to pkg/plugin/clientmgmt/process/registry_test.go index 45bbcbb899..cd0614f857 100644 --- a/pkg/plugin/clientmgmt/registry_test.go +++ b/pkg/plugin/clientmgmt/process/registry_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "os" diff --git a/pkg/plugin/clientmgmt/restartable_process.go b/pkg/plugin/clientmgmt/process/restartable_process.go similarity index 75% rename from pkg/plugin/clientmgmt/restartable_process.go rename to pkg/plugin/clientmgmt/process/restartable_process.go index 54211d8d1f..21ed810225 100644 --- a/pkg/plugin/clientmgmt/restartable_process.go +++ b/pkg/plugin/clientmgmt/process/restartable_process.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package process import ( "sync" @@ -24,26 +24,26 @@ import ( ) type RestartableProcessFactory interface { - newRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) + NewRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) } type restartableProcessFactory struct { } -func newRestartableProcessFactory() RestartableProcessFactory { +func NewRestartableProcessFactory() RestartableProcessFactory { return &restartableProcessFactory{} } -func (rpf *restartableProcessFactory) newRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) { +func (rpf *restartableProcessFactory) NewRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) { return newRestartableProcess(command, logger, logLevel) } type RestartableProcess interface { - addReinitializer(key kindAndName, r reinitializer) - reset() error - resetIfNeeded() error - getByKindAndName(key kindAndName) (interface{}, error) - stop() + AddReinitializer(key KindAndName, r Reinitializer) + Reset() error + ResetIfNeeded() error + GetByKindAndName(key KindAndName) (interface{}, error) + Stop() } // restartableProcess encapsulates the lifecycle for all plugins contained in a single executable file. It is able @@ -57,15 +57,15 @@ type restartableProcess struct { // lock guards all of the fields below lock sync.RWMutex process Process - plugins map[kindAndName]interface{} - reinitializers map[kindAndName]reinitializer + plugins map[KindAndName]interface{} + reinitializers map[KindAndName]Reinitializer resetFailures int } // reinitializer is capable of reinitializing a restartable plugin instance using the newly dispensed plugin. -type reinitializer interface { +type Reinitializer interface { // reinitialize reinitializes a restartable plugin instance using the newly dispensed plugin. - reinitialize(dispensed interface{}) error + Reinitialize(dispensed interface{}) error } // newRestartableProcess creates a new restartableProcess for the given command and options. @@ -74,26 +74,26 @@ func newRestartableProcess(command string, logger logrus.FieldLogger, logLevel l command: command, logger: logger, logLevel: logLevel, - plugins: make(map[kindAndName]interface{}), - reinitializers: make(map[kindAndName]reinitializer), + plugins: make(map[KindAndName]interface{}), + reinitializers: make(map[KindAndName]Reinitializer), } // This launches the process - err := p.reset() + err := p.Reset() return p, err } -// addReinitializer registers the reinitializer r for key. -func (p *restartableProcess) addReinitializer(key kindAndName, r reinitializer) { +// AddReinitializer registers the reinitializer r for key. +func (p *restartableProcess) AddReinitializer(key KindAndName, r Reinitializer) { p.lock.Lock() defer p.lock.Unlock() p.reinitializers[key] = r } -// reset acquires the lock and calls resetLH. -func (p *restartableProcess) reset() error { +// Reset acquires the lock and calls resetLH. +func (p *restartableProcess) Reset() error { p.lock.Lock() defer p.lock.Unlock() @@ -118,7 +118,7 @@ func (p *restartableProcess) resetLH() error { // Redispense any previously dispensed plugins, reinitializing if necessary. // Start by creating a new map to hold the newly dispensed plugins. - newPlugins := make(map[kindAndName]interface{}) + newPlugins := make(map[KindAndName]interface{}) for key := range p.plugins { // Re-dispense dispensed, err := p.process.dispense(key) @@ -131,7 +131,7 @@ func (p *restartableProcess) resetLH() error { // Reinitialize if r, found := p.reinitializers[key]; found { - if err := r.reinitialize(dispensed); err != nil { + if err := r.Reinitialize(dispensed); err != nil { p.resetFailures++ return err } @@ -146,8 +146,8 @@ func (p *restartableProcess) resetLH() error { return nil } -// resetIfNeeded checks if the plugin process has exited and resets p if it has. -func (p *restartableProcess) resetIfNeeded() error { +// ResetIfNeeded checks if the plugin process has exited and resets p if it has. +func (p *restartableProcess) ResetIfNeeded() error { p.lock.Lock() defer p.lock.Unlock() @@ -159,8 +159,8 @@ func (p *restartableProcess) resetIfNeeded() error { return nil } -// getByKindAndName acquires the lock and calls getByKindAndNameLH. -func (p *restartableProcess) getByKindAndName(key kindAndName) (interface{}, error) { +// GetByKindAndName acquires the lock and calls getByKindAndNameLH. +func (p *restartableProcess) GetByKindAndName(key KindAndName) (interface{}, error) { p.lock.Lock() defer p.lock.Unlock() @@ -169,7 +169,7 @@ func (p *restartableProcess) getByKindAndName(key kindAndName) (interface{}, err // getByKindAndNameLH returns the dispensed plugin for key. If the plugin hasn't been dispensed before, it dispenses a // new one. -func (p *restartableProcess) getByKindAndNameLH(key kindAndName) (interface{}, error) { +func (p *restartableProcess) getByKindAndNameLH(key KindAndName) (interface{}, error) { dispensed, found := p.plugins[key] if found { return dispensed, nil @@ -184,7 +184,7 @@ func (p *restartableProcess) getByKindAndNameLH(key kindAndName) (interface{}, e } // stop terminates the plugin process. -func (p *restartableProcess) stop() { +func (p *restartableProcess) Stop() { p.lock.Lock() p.process.kill() p.lock.Unlock() diff --git a/pkg/plugin/clientmgmt/restartable_delete_item_action.go b/pkg/plugin/clientmgmt/restartable_delete_item_action.go index 266500c7a8..ead2b9b09f 100644 --- a/pkg/plugin/clientmgmt/restartable_delete_item_action.go +++ b/pkg/plugin/clientmgmt/restartable_delete_item_action.go @@ -19,7 +19,8 @@ package clientmgmt import ( "github.com/pkg/errors" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) @@ -28,15 +29,15 @@ import ( // call, the restartableDeleteItemAction asks its restartableProcess to restart itself if needed (e.g. if the // process terminated for any reason), then it proceeds with the actual call. type restartableDeleteItemAction struct { - key kindAndName - sharedPluginProcess RestartableProcess + key process.KindAndName + sharedPluginProcess process.RestartableProcess config map[string]string } -// newRestartableDeleteItemAction returns a new restartableDeleteItemAction. -func newRestartableDeleteItemAction(name string, sharedPluginProcess RestartableProcess) *restartableDeleteItemAction { +// NewRestartableDeleteItemAction returns a new restartableDeleteItemAction. +func NewRestartableDeleteItemAction(name string, sharedPluginProcess process.RestartableProcess) *restartableDeleteItemAction { r := &restartableDeleteItemAction{ - key: kindAndName{kind: framework.PluginKindDeleteItemAction, name: name}, + key: process.KindAndName{Kind: common.PluginKindDeleteItemAction, Name: name}, sharedPluginProcess: sharedPluginProcess, } return r @@ -45,7 +46,7 @@ func newRestartableDeleteItemAction(name string, sharedPluginProcess Restartable // getDeleteItemAction returns the delete item action for this restartableDeleteItemAction. It does *not* restart the // plugin process. func (r *restartableDeleteItemAction) getDeleteItemAction() (velero.DeleteItemAction, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) + plugin, err := r.sharedPluginProcess.GetByKindAndName(r.key) if err != nil { return nil, err } @@ -60,7 +61,7 @@ func (r *restartableDeleteItemAction) getDeleteItemAction() (velero.DeleteItemAc // getDelegate restarts the plugin process (if needed) and returns the delete item action for this restartableDeleteItemAction. func (r *restartableDeleteItemAction) getDelegate() (velero.DeleteItemAction, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { + if err := r.sharedPluginProcess.ResetIfNeeded(); err != nil { return nil, err } diff --git a/pkg/plugin/clientmgmt/restartable_delete_item_action_test.go b/pkg/plugin/clientmgmt/restartable_delete_item_action_test.go index 641959e17f..fae5444aca 100644 --- a/pkg/plugin/clientmgmt/restartable_delete_item_action_test.go +++ b/pkg/plugin/clientmgmt/restartable_delete_item_action_test.go @@ -24,8 +24,10 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/vmware-tanzu/velero/internal/restartabletest" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks" ) @@ -55,14 +57,14 @@ func TestRestartableGetDeleteItemAction(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) name := "pod" - key := kindAndName{kind: framework.PluginKindDeleteItemAction, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindDeleteItemAction, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) - r := newRestartableDeleteItemAction(name, p) + r := NewRestartableDeleteItemAction(name, p) a, err := r.getDeleteItemAction() if tc.expectedError != "" { assert.EqualError(t, err, tc.expectedError) @@ -76,23 +78,23 @@ func TestRestartableGetDeleteItemAction(t *testing.T) { } func TestRestartableDeleteItemActionGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "pod" - r := newRestartableDeleteItemAction(name, p) + r := NewRestartableDeleteItemAction(name, p) a, err := r.getDelegate() assert.Nil(t, a) assert.EqualError(t, err, "reset error") // Happy path // Currently broken since this mocks out the restore item action interface - p.On("resetIfNeeded").Return(nil) + p.On("ResetIfNeeded").Return(nil) expected := new(mocks.DeleteItemAction) - key := kindAndName{kind: framework.PluginKindDeleteItemAction, name: name} - p.On("getByKindAndName", key).Return(expected, nil) + key := process.KindAndName{Kind: common.PluginKindDeleteItemAction, Name: name} + p.On("GetByKindAndName", key).Return(expected, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -113,30 +115,30 @@ func TestRestartableDeleteItemActionDelegatedFunctions(t *testing.T) { Backup: backup, } - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindDeleteItemAction, - func(key kindAndName, p RestartableProcess) interface{} { + common.PluginKindDeleteItemAction, + func(key process.KindAndName, p process.RestartableProcess) interface{} { return &restartableDeleteItemAction{ key: key, sharedPluginProcess: p, } }, - func() mockable { + func() restartabletest.Mockable { // Currently broken because this mocks the restore item action interface return new(mocks.DeleteItemAction) }, - restartableDelegateTest{ - function: "AppliesTo", - inputs: []interface{}{}, - expectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "AppliesTo", + Inputs: []interface{}{}, + ExpectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "Execute", - inputs: []interface{}{input}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "Execute", + Inputs: []interface{}{input}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/clientmgmt/restartable_item_snapshotter.go b/pkg/plugin/clientmgmt/restartable_item_snapshotter.go index e211bcf28e..204d6ca229 100644 --- a/pkg/plugin/clientmgmt/restartable_item_snapshotter.go +++ b/pkg/plugin/clientmgmt/restartable_item_snapshotter.go @@ -21,21 +21,21 @@ import ( "github.com/pkg/errors" - isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" - - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" ) type restartableItemSnapshotter struct { - key kindAndName - sharedPluginProcess RestartableProcess + key process.KindAndName + sharedPluginProcess process.RestartableProcess } -// newRestartableItemSnapshotter returns a new newRestartableItemSnapshotter. -func newRestartableItemSnapshotter(name string, sharedPluginProcess RestartableProcess) *restartableItemSnapshotter { +// NewRestartableItemSnapshotter returns a new restartableItemSnapshotter. +func NewRestartableItemSnapshotter(name string, sharedPluginProcess process.RestartableProcess) *restartableItemSnapshotter { r := &restartableItemSnapshotter{ - key: kindAndName{kind: framework.PluginKindItemSnapshotter, name: name}, + key: process.KindAndName{Kind: common.PluginKindItemSnapshotter, Name: name}, sharedPluginProcess: sharedPluginProcess, } return r @@ -44,7 +44,7 @@ func newRestartableItemSnapshotter(name string, sharedPluginProcess RestartableP // getItemSnapshotter returns the item snapshotter for this restartableItemSnapshotter. It does *not* restart the // plugin process. func (r *restartableItemSnapshotter) getItemSnapshotter() (isv1.ItemSnapshotter, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) + plugin, err := r.sharedPluginProcess.GetByKindAndName(r.key) if err != nil { return nil, err } @@ -59,7 +59,7 @@ func (r *restartableItemSnapshotter) getItemSnapshotter() (isv1.ItemSnapshotter, // getDelegate restarts the plugin process (if needed) and returns the item snapshotter for this restartableItemSnapshotter. func (r *restartableItemSnapshotter) getDelegate() (isv1.ItemSnapshotter, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { + if err := r.sharedPluginProcess.ResetIfNeeded(); err != nil { return nil, err } diff --git a/pkg/plugin/clientmgmt/restartable_item_snapshotter_test.go b/pkg/plugin/clientmgmt/restartable_item_snapshotter_test.go index 30b2aab437..29805bc424 100644 --- a/pkg/plugin/clientmgmt/restartable_item_snapshotter_test.go +++ b/pkg/plugin/clientmgmt/restartable_item_snapshotter_test.go @@ -21,19 +21,20 @@ import ( "testing" "time" - isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/vmware-tanzu/velero/internal/restartabletest" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1/mocks" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" ) func TestRestartableGetItemSnapshotter(t *testing.T) { @@ -61,14 +62,14 @@ func TestRestartableGetItemSnapshotter(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) name := "pvc" - key := kindAndName{kind: framework.PluginKindItemSnapshotter, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindItemSnapshotter, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) - r := newRestartableItemSnapshotter(name, p) + r := NewRestartableItemSnapshotter(name, p) a, err := r.getItemSnapshotter() if tc.expectedError != "" { assert.EqualError(t, err, tc.expectedError) @@ -82,22 +83,22 @@ func TestRestartableGetItemSnapshotter(t *testing.T) { } func TestRestartableItemSnapshotterGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "pvc" - r := newRestartableItemSnapshotter(name, p) + r := NewRestartableItemSnapshotter(name, p) a, err := r.getDelegate() assert.Nil(t, a) assert.EqualError(t, err, "reset error") // Happy path - p.On("resetIfNeeded").Return(nil) + p.On("ResetIfNeeded").Return(nil) expected := new(mocks.ItemSnapshotter) - key := kindAndName{kind: framework.PluginKindItemSnapshotter, name: name} - p.On("getByKindAndName", key).Return(expected, nil) + key := process.KindAndName{Kind: common.PluginKindItemSnapshotter, Name: name} + p.On("GetByKindAndName", key).Return(expected, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -175,59 +176,59 @@ func TestRestartableItemSnasphotterDelegatedFunctions(t *testing.T) { SnapshotMetadata: nil, Params: nil, } - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindItemSnapshotter, - func(key kindAndName, p RestartableProcess) interface{} { + common.PluginKindItemSnapshotter, + func(key process.KindAndName, p process.RestartableProcess) interface{} { return &restartableItemSnapshotter{ key: key, sharedPluginProcess: p, } }, - func() mockable { + func() restartabletest.Mockable { return new(mocks.ItemSnapshotter) }, - restartableDelegateTest{ - function: "Init", - inputs: []interface{}{map[string]string{}}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "Init", + Inputs: []interface{}{map[string]string{}}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "AppliesTo", - inputs: []interface{}{}, - expectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "AppliesTo", + Inputs: []interface{}{}, + ExpectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "AlsoHandles", - inputs: []interface{}{&isv1.AlsoHandlesInput{}}, - expectedErrorOutputs: []interface{}{[]velero.ResourceIdentifier([]velero.ResourceIdentifier(nil)), errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{[]velero.ResourceIdentifier([]velero.ResourceIdentifier(nil)), errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "AlsoHandles", + Inputs: []interface{}{&isv1.AlsoHandlesInput{}}, + ExpectedErrorOutputs: []interface{}{[]velero.ResourceIdentifier([]velero.ResourceIdentifier(nil)), errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{[]velero.ResourceIdentifier([]velero.ResourceIdentifier(nil)), errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "SnapshotItem", - inputs: []interface{}{ctx, sii}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{sio, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "SnapshotItem", + Inputs: []interface{}{ctx, sii}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{sio, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "CreateItemFromSnapshot", - inputs: []interface{}{ctx, cii}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{cio, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "CreateItemFromSnapshot", + Inputs: []interface{}{ctx, cii}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{cio, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "Progress", - inputs: []interface{}{pi}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{po, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "Progress", + Inputs: []interface{}{pi}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{po, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "DeleteSnapshot", - inputs: []interface{}{ctx, dsi}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "DeleteSnapshot", + Inputs: []interface{}{ctx, dsi}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/clientmgmt/restartable_object_store.go b/pkg/plugin/clientmgmt/restartable_object_store.go index 95fbc75c5f..a3552bd0ab 100644 --- a/pkg/plugin/clientmgmt/restartable_object_store.go +++ b/pkg/plugin/clientmgmt/restartable_object_store.go @@ -22,7 +22,8 @@ import ( "github.com/pkg/errors" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) @@ -31,29 +32,29 @@ import ( // call, the restartableObjectStore asks its restartableProcess to restart itself if needed (e.g. if the // process terminated for any reason), then it proceeds with the actual call. type restartableObjectStore struct { - key kindAndName - sharedPluginProcess RestartableProcess + key process.KindAndName + sharedPluginProcess process.RestartableProcess // config contains the data used to initialize the plugin. It is used to reinitialize the plugin in the event its // sharedPluginProcess gets restarted. config map[string]string } -// newRestartableObjectStore returns a new restartableObjectStore. -func newRestartableObjectStore(name string, sharedPluginProcess RestartableProcess) *restartableObjectStore { - key := kindAndName{kind: framework.PluginKindObjectStore, name: name} +// NewRestartableObjectStore returns a new restartableObjectStore. +func NewRestartableObjectStore(name string, sharedPluginProcess process.RestartableProcess) *restartableObjectStore { + key := process.KindAndName{Kind: common.PluginKindObjectStore, Name: name} r := &restartableObjectStore{ key: key, sharedPluginProcess: sharedPluginProcess, } // Register our reinitializer so we can reinitialize after a restart with r.config. - sharedPluginProcess.addReinitializer(key, r) + sharedPluginProcess.AddReinitializer(key, r) return r } // reinitialize reinitializes a re-dispensed plugin using the initial data passed to Init(). -func (r *restartableObjectStore) reinitialize(dispensed interface{}) error { +func (r *restartableObjectStore) Reinitialize(dispensed interface{}) error { objectStore, ok := dispensed.(velero.ObjectStore) if !ok { return errors.Errorf("%T is not a ObjectStore!", dispensed) @@ -65,7 +66,7 @@ func (r *restartableObjectStore) reinitialize(dispensed interface{}) error { // getObjectStore returns the object store for this restartableObjectStore. It does *not* restart the // plugin process. func (r *restartableObjectStore) getObjectStore() (velero.ObjectStore, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) + plugin, err := r.sharedPluginProcess.GetByKindAndName(r.key) if err != nil { return nil, err } @@ -80,7 +81,7 @@ func (r *restartableObjectStore) getObjectStore() (velero.ObjectStore, error) { // getDelegate restarts the plugin process (if needed) and returns the object store for this restartableObjectStore. func (r *restartableObjectStore) getDelegate() (velero.ObjectStore, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { + if err := r.sharedPluginProcess.ResetIfNeeded(); err != nil { return nil, err } diff --git a/pkg/plugin/clientmgmt/restartable_object_store_test.go b/pkg/plugin/clientmgmt/restartable_object_store_test.go index 57a64e8b2a..2c6bc0fdce 100644 --- a/pkg/plugin/clientmgmt/restartable_object_store_test.go +++ b/pkg/plugin/clientmgmt/restartable_object_store_test.go @@ -26,7 +26,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/internal/restartabletest" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks" ) @@ -55,13 +57,13 @@ func TestRestartableGetObjectStore(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) name := "aws" - key := kindAndName{kind: framework.PluginKindObjectStore, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindObjectStore, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) r := &restartableObjectStore{ key: key, @@ -80,12 +82,12 @@ func TestRestartableGetObjectStore(t *testing.T) { } func TestRestartableObjectStoreReinitialize(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) name := "aws" - key := kindAndName{kind: framework.PluginKindObjectStore, name: name} + key := process.KindAndName{Kind: common.PluginKindObjectStore, Name: name} r := &restartableObjectStore{ key: key, sharedPluginProcess: p, @@ -94,7 +96,7 @@ func TestRestartableObjectStoreReinitialize(t *testing.T) { }, } - err := r.reinitialize(3) + err := r.Reinitialize(3) assert.EqualError(t, err, "int is not a ObjectStore!") objectStore := new(providermocks.ObjectStore) @@ -102,23 +104,23 @@ func TestRestartableObjectStoreReinitialize(t *testing.T) { defer objectStore.AssertExpectations(t) objectStore.On("Init", r.config).Return(errors.Errorf("init error")).Once() - err = r.reinitialize(objectStore) + err = r.Reinitialize(objectStore) assert.EqualError(t, err, "init error") objectStore.On("Init", r.config).Return(nil) - err = r.reinitialize(objectStore) + err = r.Reinitialize(objectStore) assert.NoError(t, err) } func TestRestartableObjectStoreGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "aws" - key := kindAndName{kind: framework.PluginKindObjectStore, name: name} + key := process.KindAndName{Kind: common.PluginKindObjectStore, Name: name} r := &restartableObjectStore{ key: key, sharedPluginProcess: p, @@ -128,11 +130,11 @@ func TestRestartableObjectStoreGetDelegate(t *testing.T) { assert.EqualError(t, err, "reset error") // Happy path - p.On("resetIfNeeded").Return(nil) + p.On("ResetIfNeeded").Return(nil) objectStore := new(providermocks.ObjectStore) objectStore.Test(t) defer objectStore.AssertExpectations(t) - p.On("getByKindAndName", key).Return(objectStore, nil) + p.On("GetByKindAndName", key).Return(objectStore, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -140,30 +142,30 @@ func TestRestartableObjectStoreGetDelegate(t *testing.T) { } func TestRestartableObjectStoreInit(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) // getObjectStore error name := "aws" - key := kindAndName{kind: framework.PluginKindObjectStore, name: name} + key := process.KindAndName{Kind: common.PluginKindObjectStore, Name: name} r := &restartableObjectStore{ key: key, sharedPluginProcess: p, } - p.On("getByKindAndName", key).Return(nil, errors.Errorf("getByKindAndName error")).Once() + p.On("GetByKindAndName", key).Return(nil, errors.Errorf("GetByKindAndName error")).Once() config := map[string]string{ "color": "blue", } err := r.Init(config) - assert.EqualError(t, err, "getByKindAndName error") + assert.EqualError(t, err, "GetByKindAndName error") // Delegate returns error objectStore := new(providermocks.ObjectStore) objectStore.Test(t) defer objectStore.AssertExpectations(t) - p.On("getByKindAndName", key).Return(objectStore, nil) + p.On("GetByKindAndName", key).Return(objectStore, nil) objectStore.On("Init", config).Return(errors.Errorf("Init error")).Once() err = r.Init(config) @@ -184,53 +186,53 @@ func TestRestartableObjectStoreInit(t *testing.T) { } func TestRestartableObjectStoreDelegatedFunctions(t *testing.T) { - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindObjectStore, - func(key kindAndName, p RestartableProcess) interface{} { + common.PluginKindObjectStore, + func(key process.KindAndName, p process.RestartableProcess) interface{} { return &restartableObjectStore{ key: key, sharedPluginProcess: p, } }, - func() mockable { + func() restartabletest.Mockable { return new(providermocks.ObjectStore) }, - restartableDelegateTest{ - function: "PutObject", - inputs: []interface{}{"bucket", "key", strings.NewReader("body")}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "PutObject", + Inputs: []interface{}{"bucket", "key", strings.NewReader("body")}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "GetObject", - inputs: []interface{}{"bucket", "key"}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{ioutil.NopCloser(strings.NewReader("object")), errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "GetObject", + Inputs: []interface{}{"bucket", "key"}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{ioutil.NopCloser(strings.NewReader("object")), errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "ListCommonPrefixes", - inputs: []interface{}{"bucket", "prefix", "delimiter"}, - expectedErrorOutputs: []interface{}{([]string)(nil), errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{[]string{"a", "b"}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "ListCommonPrefixes", + Inputs: []interface{}{"bucket", "prefix", "delimiter"}, + ExpectedErrorOutputs: []interface{}{([]string)(nil), errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{[]string{"a", "b"}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "ListObjects", - inputs: []interface{}{"bucket", "prefix"}, - expectedErrorOutputs: []interface{}{([]string)(nil), errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{[]string{"a", "b"}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "ListObjects", + Inputs: []interface{}{"bucket", "prefix"}, + ExpectedErrorOutputs: []interface{}{([]string)(nil), errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{[]string{"a", "b"}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "DeleteObject", - inputs: []interface{}{"bucket", "key"}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "DeleteObject", + Inputs: []interface{}{"bucket", "key"}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "CreateSignedURL", - inputs: []interface{}{"bucket", "key", 30 * time.Minute}, - expectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{"signedURL", errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "CreateSignedURL", + Inputs: []interface{}{"bucket", "key", 30 * time.Minute}, + ExpectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{"signedURL", errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/clientmgmt/restartable_restore_item_action.go b/pkg/plugin/clientmgmt/restartable_restore_item_action.go deleted file mode 100644 index 0f55ebfdec..0000000000 --- a/pkg/plugin/clientmgmt/restartable_restore_item_action.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2018 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientmgmt - -import ( - "github.com/pkg/errors" - - "github.com/vmware-tanzu/velero/pkg/plugin/framework" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" -) - -// restartableRestoreItemAction is a restore item action for a given implementation (such as "pod"). It is associated with -// a restartableProcess, which may be shared and used to run multiple plugins. At the beginning of each method -// call, the restartableRestoreItemAction asks its restartableProcess to restart itself if needed (e.g. if the -// process terminated for any reason), then it proceeds with the actual call. -type restartableRestoreItemAction struct { - key kindAndName - sharedPluginProcess RestartableProcess - config map[string]string -} - -// newRestartableRestoreItemAction returns a new restartableRestoreItemAction. -func newRestartableRestoreItemAction(name string, sharedPluginProcess RestartableProcess) *restartableRestoreItemAction { - r := &restartableRestoreItemAction{ - key: kindAndName{kind: framework.PluginKindRestoreItemAction, name: name}, - sharedPluginProcess: sharedPluginProcess, - } - return r -} - -// getRestoreItemAction returns the restore item action for this restartableRestoreItemAction. It does *not* restart the -// plugin process. -func (r *restartableRestoreItemAction) getRestoreItemAction() (velero.RestoreItemAction, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) - if err != nil { - return nil, err - } - - restoreItemAction, ok := plugin.(velero.RestoreItemAction) - if !ok { - return nil, errors.Errorf("%T is not a RestoreItemAction!", plugin) - } - - return restoreItemAction, nil -} - -// getDelegate restarts the plugin process (if needed) and returns the restore item action for this restartableRestoreItemAction. -func (r *restartableRestoreItemAction) getDelegate() (velero.RestoreItemAction, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { - return nil, err - } - - return r.getRestoreItemAction() -} - -// AppliesTo restarts the plugin's process if needed, then delegates the call. -func (r *restartableRestoreItemAction) AppliesTo() (velero.ResourceSelector, error) { - delegate, err := r.getDelegate() - if err != nil { - return velero.ResourceSelector{}, err - } - - return delegate.AppliesTo() -} - -// Execute restarts the plugin's process if needed, then delegates the call. -func (r *restartableRestoreItemAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - delegate, err := r.getDelegate() - if err != nil { - return nil, err - } - - return delegate.Execute(input) -} diff --git a/pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action.go b/pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action.go new file mode 100644 index 0000000000..a6b595544d --- /dev/null +++ b/pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action.go @@ -0,0 +1,109 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/pkg/errors" + + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" +) + +// AdaptedRestoreItemAction is a restore item action adapted to the v1 RestoreItemAction API +type AdaptedRestoreItemAction struct { + Kind common.PluginKind + + // Get returns a restartable RestoreItemAction for the given name and process, wrapping if necessary + GetRestartable func(name string, restartableProcess process.RestartableProcess) riav1.RestoreItemAction +} + +func AdaptedRestoreItemActions() []AdaptedRestoreItemAction { + return []AdaptedRestoreItemAction{ + { + Kind: common.PluginKindRestoreItemAction, + GetRestartable: func(name string, restartableProcess process.RestartableProcess) riav1.RestoreItemAction { + return NewRestartableRestoreItemAction(name, restartableProcess) + }, + }, + } +} + +// RestartableRestoreItemAction is a restore item action for a given implementation (such as "pod"). It is associated with +// a restartableProcess, which may be shared and used to run multiple plugins. At the beginning of each method +// call, the RestartableRestoreItemAction asks its restartableProcess to restart itself if needed (e.g. if the +// process terminated for any reason), then it proceeds with the actual call. +type RestartableRestoreItemAction struct { + Key process.KindAndName + SharedPluginProcess process.RestartableProcess + config map[string]string +} + +// NewRestartableRestoreItemAction returns a new RestartableRestoreItemAction. +func NewRestartableRestoreItemAction(name string, sharedPluginProcess process.RestartableProcess) *RestartableRestoreItemAction { + r := &RestartableRestoreItemAction{ + Key: process.KindAndName{Kind: common.PluginKindRestoreItemAction, Name: name}, + SharedPluginProcess: sharedPluginProcess, + } + return r +} + +// getRestoreItemAction returns the restore item action for this RestartableRestoreItemAction. It does *not* restart the +// plugin process. +func (r *RestartableRestoreItemAction) getRestoreItemAction() (riav1.RestoreItemAction, error) { + plugin, err := r.SharedPluginProcess.GetByKindAndName(r.Key) + if err != nil { + return nil, err + } + + restoreItemAction, ok := plugin.(riav1.RestoreItemAction) + if !ok { + return nil, errors.Errorf("%T is not a RestoreItemAction!", plugin) + } + + return restoreItemAction, nil +} + +// getDelegate restarts the plugin process (if needed) and returns the restore item action for this RestartableRestoreItemAction. +func (r *RestartableRestoreItemAction) getDelegate() (riav1.RestoreItemAction, error) { + if err := r.SharedPluginProcess.ResetIfNeeded(); err != nil { + return nil, err + } + + return r.getRestoreItemAction() +} + +// AppliesTo restarts the plugin's process if needed, then delegates the call. +func (r RestartableRestoreItemAction) AppliesTo() (velero.ResourceSelector, error) { + delegate, err := r.getDelegate() + if err != nil { + return velero.ResourceSelector{}, err + } + + return delegate.AppliesTo() +} + +// Execute restarts the plugin's process if needed, then delegates the call. +func (r *RestartableRestoreItemAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + delegate, err := r.getDelegate() + if err != nil { + return nil, err + } + + return delegate.Execute(input) +} diff --git a/pkg/plugin/clientmgmt/restartable_restore_item_action_test.go b/pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action_test.go similarity index 56% rename from pkg/plugin/clientmgmt/restartable_restore_item_action_test.go rename to pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action_test.go index 8a37cdb4a9..55a0db2781 100644 --- a/pkg/plugin/clientmgmt/restartable_restore_item_action_test.go +++ b/pkg/plugin/clientmgmt/restoreitemaction/v1/restartable_restore_item_action_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package v1 import ( "testing" @@ -24,10 +24,13 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/vmware-tanzu/velero/internal/restartabletest" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" - "github.com/vmware-tanzu/velero/pkg/restore/mocks" + mocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/restoreitemaction/v1" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) func TestRestartableGetRestoreItemAction(t *testing.T) { @@ -49,20 +52,20 @@ func TestRestartableGetRestoreItemAction(t *testing.T) { }, { name: "happy path", - plugin: new(mocks.ItemAction), + plugin: new(mocks.RestoreItemAction), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) name := "pod" - key := kindAndName{kind: framework.PluginKindRestoreItemAction, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindRestoreItemAction, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) - r := newRestartableRestoreItemAction(name, p) + r := NewRestartableRestoreItemAction(name, p) a, err := r.getRestoreItemAction() if tc.expectedError != "" { assert.EqualError(t, err, tc.expectedError) @@ -76,22 +79,22 @@ func TestRestartableGetRestoreItemAction(t *testing.T) { } func TestRestartableRestoreItemActionGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "pod" - r := newRestartableRestoreItemAction(name, p) + r := NewRestartableRestoreItemAction(name, p) a, err := r.getDelegate() assert.Nil(t, a) assert.EqualError(t, err, "reset error") // Happy path - p.On("resetIfNeeded").Return(nil) - expected := new(mocks.ItemAction) - key := kindAndName{kind: framework.PluginKindRestoreItemAction, name: name} - p.On("getByKindAndName", key).Return(expected, nil) + p.On("ResetIfNeeded").Return(nil) + expected := new(mocks.RestoreItemAction) + key := process.KindAndName{Kind: common.PluginKindRestoreItemAction, Name: name} + p.On("GetByKindAndName", key).Return(expected, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -105,13 +108,13 @@ func TestRestartableRestoreItemActionDelegatedFunctions(t *testing.T) { }, } - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: pv, ItemFromBackup: pv, Restore: new(v1.Restore), } - output := &velero.RestoreItemActionExecuteOutput{ + output := &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: &unstructured.Unstructured{ Object: map[string]interface{}{ "color": "green", @@ -119,29 +122,29 @@ func TestRestartableRestoreItemActionDelegatedFunctions(t *testing.T) { }, } - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindRestoreItemAction, - func(key kindAndName, p RestartableProcess) interface{} { - return &restartableRestoreItemAction{ - key: key, - sharedPluginProcess: p, + common.PluginKindRestoreItemAction, + func(key process.KindAndName, p process.RestartableProcess) interface{} { + return &RestartableRestoreItemAction{ + Key: key, + SharedPluginProcess: p, } }, - func() mockable { - return new(mocks.ItemAction) + func() restartabletest.Mockable { + return new(mocks.RestoreItemAction) }, - restartableDelegateTest{ - function: "AppliesTo", - inputs: []interface{}{}, - expectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "AppliesTo", + Inputs: []interface{}{}, + ExpectedErrorOutputs: []interface{}{velero.ResourceSelector{}, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{velero.ResourceSelector{IncludedNamespaces: []string{"a"}}, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "Execute", - inputs: []interface{}{input}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{output, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "Execute", + Inputs: []interface{}{input}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{output, errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/clientmgmt/restartable_volume_snapshotter.go b/pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter.go similarity index 60% rename from pkg/plugin/clientmgmt/restartable_volume_snapshotter.go rename to pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter.go index 5e46bf2299..00a6742347 100644 --- a/pkg/plugin/clientmgmt/restartable_volume_snapshotter.go +++ b/pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,43 +14,63 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package v1 import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" ) -// restartableVolumeSnapshotter is a volume snapshotter for a given implementation (such as "aws"). It is associated with +// AdaptedVolumeSnapshotter is a volume snapshotter adapted to the v1 VolumeSnapshotter API +type AdaptedVolumeSnapshotter struct { + Kind common.PluginKind + + // Get returns a restartable VolumeSnapshotter for the given name and process, wrapping if necessary + GetRestartable func(name string, restartableProcess process.RestartableProcess) vsv1.VolumeSnapshotter +} + +func AdaptedVolumeSnapshotters() []AdaptedVolumeSnapshotter { + return []AdaptedVolumeSnapshotter{ + { + Kind: common.PluginKindVolumeSnapshotter, + GetRestartable: func(name string, restartableProcess process.RestartableProcess) vsv1.VolumeSnapshotter { + return NewRestartableVolumeSnapshotter(name, restartableProcess) + }, + }, + } +} + +// RestartableVolumeSnapshotter is a volume snapshotter for a given implementation (such as "aws"). It is associated with // a restartableProcess, which may be shared and used to run multiple plugins. At the beginning of each method // call, the restartableVolumeSnapshotter asks its restartableProcess to restart itself if needed (e.g. if the // process terminated for any reason), then it proceeds with the actual call. -type restartableVolumeSnapshotter struct { - key kindAndName - sharedPluginProcess RestartableProcess +type RestartableVolumeSnapshotter struct { + Key process.KindAndName + SharedPluginProcess process.RestartableProcess config map[string]string } -// newRestartableVolumeSnapshotter returns a new restartableVolumeSnapshotter. -func newRestartableVolumeSnapshotter(name string, sharedPluginProcess RestartableProcess) *restartableVolumeSnapshotter { - key := kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name} - r := &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: sharedPluginProcess, +// NewRestartableVolumeSnapshotter returns a new restartableVolumeSnapshotter. +func NewRestartableVolumeSnapshotter(name string, sharedPluginProcess process.RestartableProcess) *RestartableVolumeSnapshotter { + key := process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name} + r := &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: sharedPluginProcess, } // Register our reinitializer so we can reinitialize after a restart with r.config. - sharedPluginProcess.addReinitializer(key, r) + sharedPluginProcess.AddReinitializer(key, r) return r } // reinitialize reinitializes a re-dispensed plugin using the initial data passed to Init(). -func (r *restartableVolumeSnapshotter) reinitialize(dispensed interface{}) error { - volumeSnapshotter, ok := dispensed.(velero.VolumeSnapshotter) +func (r *RestartableVolumeSnapshotter) Reinitialize(dispensed interface{}) error { + volumeSnapshotter, ok := dispensed.(vsv1.VolumeSnapshotter) if !ok { return errors.Errorf("%T is not a VolumeSnapshotter!", dispensed) } @@ -59,13 +79,13 @@ func (r *restartableVolumeSnapshotter) reinitialize(dispensed interface{}) error // getVolumeSnapshotter returns the volume snapshotter for this restartableVolumeSnapshotter. It does *not* restart the // plugin process. -func (r *restartableVolumeSnapshotter) getVolumeSnapshotter() (velero.VolumeSnapshotter, error) { - plugin, err := r.sharedPluginProcess.getByKindAndName(r.key) +func (r *RestartableVolumeSnapshotter) getVolumeSnapshotter() (vsv1.VolumeSnapshotter, error) { + plugin, err := r.SharedPluginProcess.GetByKindAndName(r.Key) if err != nil { return nil, err } - volumeSnapshotter, ok := plugin.(velero.VolumeSnapshotter) + volumeSnapshotter, ok := plugin.(vsv1.VolumeSnapshotter) if !ok { return nil, errors.Errorf("%T is not a VolumeSnapshotter!", plugin) } @@ -73,9 +93,9 @@ func (r *restartableVolumeSnapshotter) getVolumeSnapshotter() (velero.VolumeSnap return volumeSnapshotter, nil } -// getDelegate restarts the plugin process (if needed) and returns the volume snapshotter for this restartableVolumeSnapshotter. -func (r *restartableVolumeSnapshotter) getDelegate() (velero.VolumeSnapshotter, error) { - if err := r.sharedPluginProcess.resetIfNeeded(); err != nil { +// getDelegate restarts the plugin process (if needed) and returns the volume snapshotter for this RestartableVolumeSnapshotter. +func (r *RestartableVolumeSnapshotter) getDelegate() (vsv1.VolumeSnapshotter, error) { + if err := r.SharedPluginProcess.ResetIfNeeded(); err != nil { return nil, err } @@ -84,7 +104,7 @@ func (r *restartableVolumeSnapshotter) getDelegate() (velero.VolumeSnapshotter, // Init initializes the volume snapshotter instance using config. If this is the first invocation, r stores config for future // reinitialization needs. Init does NOT restart the shared plugin process. Init may only be called once. -func (r *restartableVolumeSnapshotter) Init(config map[string]string) error { +func (r *RestartableVolumeSnapshotter) Init(config map[string]string) error { if r.config != nil { return errors.Errorf("already initialized") } @@ -102,12 +122,12 @@ func (r *restartableVolumeSnapshotter) Init(config map[string]string) error { // init calls Init on volumeSnapshotter with config. This is split out from Init() so that both Init() and reinitialize() may // call it using a specific VolumeSnapshotter. -func (r *restartableVolumeSnapshotter) init(volumeSnapshotter velero.VolumeSnapshotter, config map[string]string) error { +func (r *RestartableVolumeSnapshotter) init(volumeSnapshotter vsv1.VolumeSnapshotter, config map[string]string) error { return volumeSnapshotter.Init(config) } // CreateVolumeFromSnapshot restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) CreateVolumeFromSnapshot(snapshotID string, volumeType string, volumeAZ string, iops *int64) (volumeID string, err error) { +func (r *RestartableVolumeSnapshotter) CreateVolumeFromSnapshot(snapshotID string, volumeType string, volumeAZ string, iops *int64) (volumeID string, err error) { delegate, err := r.getDelegate() if err != nil { return "", err @@ -116,7 +136,7 @@ func (r *restartableVolumeSnapshotter) CreateVolumeFromSnapshot(snapshotID strin } // GetVolumeID restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) { +func (r *RestartableVolumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) { delegate, err := r.getDelegate() if err != nil { return "", err @@ -125,7 +145,7 @@ func (r *restartableVolumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (str } // SetVolumeID restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { +func (r *RestartableVolumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { delegate, err := r.getDelegate() if err != nil { return nil, err @@ -134,7 +154,7 @@ func (r *restartableVolumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volu } // GetVolumeInfo restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) GetVolumeInfo(volumeID string, volumeAZ string) (string, *int64, error) { +func (r *RestartableVolumeSnapshotter) GetVolumeInfo(volumeID string, volumeAZ string) (string, *int64, error) { delegate, err := r.getDelegate() if err != nil { return "", nil, err @@ -143,7 +163,7 @@ func (r *restartableVolumeSnapshotter) GetVolumeInfo(volumeID string, volumeAZ s } // CreateSnapshot restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) CreateSnapshot(volumeID string, volumeAZ string, tags map[string]string) (snapshotID string, err error) { +func (r *RestartableVolumeSnapshotter) CreateSnapshot(volumeID string, volumeAZ string, tags map[string]string) (snapshotID string, err error) { delegate, err := r.getDelegate() if err != nil { return "", err @@ -152,7 +172,7 @@ func (r *restartableVolumeSnapshotter) CreateSnapshot(volumeID string, volumeAZ } // DeleteSnapshot restarts the plugin's process if needed, then delegates the call. -func (r *restartableVolumeSnapshotter) DeleteSnapshot(snapshotID string) error { +func (r *RestartableVolumeSnapshotter) DeleteSnapshot(snapshotID string) error { delegate, err := r.getDelegate() if err != nil { return err diff --git a/pkg/plugin/clientmgmt/restartable_volume_snapshotter_test.go b/pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter_test.go similarity index 54% rename from pkg/plugin/clientmgmt/restartable_volume_snapshotter_test.go rename to pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter_test.go index 762153e054..787191ec05 100644 --- a/pkg/plugin/clientmgmt/restartable_volume_snapshotter_test.go +++ b/pkg/plugin/clientmgmt/volumesnapshotter/v1/restartable_volume_snapshotter_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clientmgmt +package v1 import ( "testing" @@ -25,8 +25,10 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" - providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks" + "github.com/vmware-tanzu/velero/internal/restartabletest" + "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/volumesnapshotter/v1" ) func TestRestartableGetVolumeSnapshotter(t *testing.T) { @@ -54,17 +56,17 @@ func TestRestartableGetVolumeSnapshotter(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) name := "aws" - key := kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name} - p.On("getByKindAndName", key).Return(tc.plugin, tc.getError) + key := process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name} + p.On("GetByKindAndName", key).Return(tc.plugin, tc.getError) - r := &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: p, + r := &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: p, } a, err := r.getVolumeSnapshotter() if tc.expectedError != "" { @@ -79,21 +81,21 @@ func TestRestartableGetVolumeSnapshotter(t *testing.T) { } func TestRestartableVolumeSnapshotterReinitialize(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) name := "aws" - key := kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name} - r := &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: p, + key := process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name} + r := &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: p, config: map[string]string{ "color": "blue", }, } - err := r.reinitialize(3) + err := r.Reinitialize(3) assert.EqualError(t, err, "int is not a VolumeSnapshotter!") volumeSnapshotter := new(providermocks.VolumeSnapshotter) @@ -101,37 +103,37 @@ func TestRestartableVolumeSnapshotterReinitialize(t *testing.T) { defer volumeSnapshotter.AssertExpectations(t) volumeSnapshotter.On("Init", r.config).Return(errors.Errorf("init error")).Once() - err = r.reinitialize(volumeSnapshotter) + err = r.Reinitialize(volumeSnapshotter) assert.EqualError(t, err, "init error") volumeSnapshotter.On("Init", r.config).Return(nil) - err = r.reinitialize(volumeSnapshotter) + err = r.Reinitialize(volumeSnapshotter) assert.NoError(t, err) } func TestRestartableVolumeSnapshotterGetDelegate(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) // Reset error - p.On("resetIfNeeded").Return(errors.Errorf("reset error")).Once() + p.On("ResetIfNeeded").Return(errors.Errorf("reset error")).Once() name := "aws" - key := kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name} - r := &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: p, + key := process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name} + r := &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: p, } a, err := r.getDelegate() assert.Nil(t, a) assert.EqualError(t, err, "reset error") // Happy path - p.On("resetIfNeeded").Return(nil) + p.On("ResetIfNeeded").Return(nil) volumeSnapshotter := new(providermocks.VolumeSnapshotter) volumeSnapshotter.Test(t) defer volumeSnapshotter.AssertExpectations(t) - p.On("getByKindAndName", key).Return(volumeSnapshotter, nil) + p.On("GetByKindAndName", key).Return(volumeSnapshotter, nil) a, err = r.getDelegate() assert.NoError(t, err) @@ -139,30 +141,30 @@ func TestRestartableVolumeSnapshotterGetDelegate(t *testing.T) { } func TestRestartableVolumeSnapshotterInit(t *testing.T) { - p := new(mockRestartableProcess) + p := new(restartabletest.MockRestartableProcess) p.Test(t) defer p.AssertExpectations(t) // getVolumeSnapshottererror name := "aws" - key := kindAndName{kind: framework.PluginKindVolumeSnapshotter, name: name} - r := &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: p, + key := process.KindAndName{Kind: common.PluginKindVolumeSnapshotter, Name: name} + r := &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: p, } - p.On("getByKindAndName", key).Return(nil, errors.Errorf("getByKindAndName error")).Once() + p.On("GetByKindAndName", key).Return(nil, errors.Errorf("GetByKindAndName error")).Once() config := map[string]string{ "color": "blue", } err := r.Init(config) - assert.EqualError(t, err, "getByKindAndName error") + assert.EqualError(t, err, "GetByKindAndName error") // Delegate returns error volumeSnapshotter := new(providermocks.VolumeSnapshotter) volumeSnapshotter.Test(t) defer volumeSnapshotter.AssertExpectations(t) - p.On("getByKindAndName", key).Return(volumeSnapshotter, nil) + p.On("GetByKindAndName", key).Return(volumeSnapshotter, nil) volumeSnapshotter.On("Init", config).Return(errors.Errorf("Init error")).Once() err = r.Init(config) @@ -195,53 +197,53 @@ func TestRestartableVolumeSnapshotterDelegatedFunctions(t *testing.T) { }, } - runRestartableDelegateTests( + restartabletest.RunRestartableDelegateTests( t, - framework.PluginKindVolumeSnapshotter, - func(key kindAndName, p RestartableProcess) interface{} { - return &restartableVolumeSnapshotter{ - key: key, - sharedPluginProcess: p, + common.PluginKindVolumeSnapshotter, + func(key process.KindAndName, p process.RestartableProcess) interface{} { + return &RestartableVolumeSnapshotter{ + Key: key, + SharedPluginProcess: p, } }, - func() mockable { + func() restartabletest.Mockable { return new(providermocks.VolumeSnapshotter) }, - restartableDelegateTest{ - function: "CreateVolumeFromSnapshot", - inputs: []interface{}{"snapshotID", "volumeID", "volumeAZ", to.Int64Ptr(10000)}, - expectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{"volumeID", errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "CreateVolumeFromSnapshot", + Inputs: []interface{}{"snapshotID", "volumeID", "volumeAZ", to.Int64Ptr(10000)}, + ExpectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{"volumeID", errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "GetVolumeID", - inputs: []interface{}{pv}, - expectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{"volumeID", errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "GetVolumeID", + Inputs: []interface{}{pv}, + ExpectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{"volumeID", errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "SetVolumeID", - inputs: []interface{}{pv, "volumeID"}, - expectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{pvToReturn, errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "SetVolumeID", + Inputs: []interface{}{pv, "volumeID"}, + ExpectedErrorOutputs: []interface{}{nil, errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{pvToReturn, errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "GetVolumeInfo", - inputs: []interface{}{"volumeID", "volumeAZ"}, - expectedErrorOutputs: []interface{}{"", (*int64)(nil), errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{"volumeType", to.Int64Ptr(10000), errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "GetVolumeInfo", + Inputs: []interface{}{"volumeID", "volumeAZ"}, + ExpectedErrorOutputs: []interface{}{"", (*int64)(nil), errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{"volumeType", to.Int64Ptr(10000), errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "CreateSnapshot", - inputs: []interface{}{"volumeID", "volumeAZ", map[string]string{"a": "b"}}, - expectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{"snapshotID", errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "CreateSnapshot", + Inputs: []interface{}{"volumeID", "volumeAZ", map[string]string{"a": "b"}}, + ExpectedErrorOutputs: []interface{}{"", errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{"snapshotID", errors.Errorf("delegate error")}, }, - restartableDelegateTest{ - function: "DeleteSnapshot", - inputs: []interface{}{"snapshotID"}, - expectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, - expectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, + restartabletest.RestartableDelegateTest{ + Function: "DeleteSnapshot", + Inputs: []interface{}{"snapshotID"}, + ExpectedErrorOutputs: []interface{}{errors.Errorf("reset error")}, + ExpectedDelegateOutputs: []interface{}{errors.Errorf("delegate error")}, }, ) } diff --git a/pkg/plugin/framework/action_resolver.go b/pkg/plugin/framework/action_resolver.go index 9797ba5269..5664d08c2c 100644 --- a/pkg/plugin/framework/action_resolver.go +++ b/pkg/plugin/framework/action_resolver.go @@ -22,10 +22,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" - - "github.com/vmware-tanzu/velero/pkg/discovery" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/util/collections" ) @@ -98,17 +99,17 @@ func resolveAction(helper discovery.Helper, action velero.Applicable) (resources } type BackupItemResolvedAction struct { - velero.BackupItemAction + biav1.BackupItemAction resolvedAction } -func NewBackupItemActionResolver(actions []velero.BackupItemAction) BackupItemActionResolver { +func NewBackupItemActionResolver(actions []biav1.BackupItemAction) BackupItemActionResolver { return BackupItemActionResolver{ actions: actions, } } -func NewRestoreItemActionResolver(actions []velero.RestoreItemAction) RestoreItemActionResolver { +func NewRestoreItemActionResolver(actions []riav1.RestoreItemAction) RestoreItemActionResolver { return RestoreItemActionResolver{ actions: actions, } @@ -127,14 +128,14 @@ func NewItemSnapshotterResolver(actions []isv1.ItemSnapshotter) ItemSnapshotterR } type ActionResolver interface { - ResolveAction(helper discovery.Helper, action velero.Applicable) (ResolvedAction, error) + ResolveAction(helper discovery.Helper, action velero.Applicable, log logrus.FieldLogger) (ResolvedAction, error) } type BackupItemActionResolver struct { - actions []velero.BackupItemAction + actions []biav1.BackupItemAction } -func (recv BackupItemActionResolver) ResolveActions(helper discovery.Helper) ([]BackupItemResolvedAction, error) { +func (recv BackupItemActionResolver) ResolveActions(helper discovery.Helper, log logrus.FieldLogger) ([]BackupItemResolvedAction, error) { var resolved []BackupItemResolvedAction for _, action := range recv.actions { resources, namespaces, selector, err := resolveAction(helper, action) @@ -155,15 +156,15 @@ func (recv BackupItemActionResolver) ResolveActions(helper discovery.Helper) ([] } type RestoreItemResolvedAction struct { - velero.RestoreItemAction + riav1.RestoreItemAction resolvedAction } type RestoreItemActionResolver struct { - actions []velero.RestoreItemAction + actions []riav1.RestoreItemAction } -func (recv RestoreItemActionResolver) ResolveActions(helper discovery.Helper) ([]RestoreItemResolvedAction, error) { +func (recv RestoreItemActionResolver) ResolveActions(helper discovery.Helper, log logrus.FieldLogger) ([]RestoreItemResolvedAction, error) { var resolved []RestoreItemResolvedAction for _, action := range recv.actions { resources, namespaces, selector, err := resolveAction(helper, action) @@ -192,7 +193,7 @@ type DeleteItemActionResolver struct { actions []velero.DeleteItemAction } -func (recv DeleteItemActionResolver) ResolveActions(helper discovery.Helper) ([]DeleteItemResolvedAction, error) { +func (recv DeleteItemActionResolver) ResolveActions(helper discovery.Helper, log logrus.FieldLogger) ([]DeleteItemResolvedAction, error) { var resolved []DeleteItemResolvedAction for _, action := range recv.actions { resources, namespaces, selector, err := resolveAction(helper, action) @@ -221,7 +222,7 @@ type ItemSnapshotterResolver struct { actions []isv1.ItemSnapshotter } -func (recv ItemSnapshotterResolver) ResolveActions(helper discovery.Helper) ([]ItemSnapshotterResolvedAction, error) { +func (recv ItemSnapshotterResolver) ResolveActions(helper discovery.Helper, log logrus.FieldLogger) ([]ItemSnapshotterResolvedAction, error) { var resolved []ItemSnapshotterResolvedAction for _, action := range recv.actions { resources, namespaces, selector, err := resolveAction(helper, action) diff --git a/pkg/plugin/framework/backup_item_action.go b/pkg/plugin/framework/backup_item_action.go index 1dc7bf3e10..8f6bc7554a 100644 --- a/pkg/plugin/framework/backup_item_action.go +++ b/pkg/plugin/framework/backup_item_action.go @@ -21,7 +21,8 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) // BackupItemActionPlugin is an implementation of go-plugin's Plugin @@ -29,16 +30,16 @@ import ( // interface. type BackupItemActionPlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns a clientDispenser for BackupItemAction gRPC clients. func (p *BackupItemActionPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newBackupItemActionGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newBackupItemActionGRPCClient), nil } // GRPCServer registers a BackupItemAction gRPC server. func (p *BackupItemActionPlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterBackupItemActionServer(server, &BackupItemActionGRPCServer{mux: p.serverMux}) + protobiav1.RegisterBackupItemActionServer(server, &BackupItemActionGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/backup_item_action_client.go b/pkg/plugin/framework/backup_item_action_client.go index b48de39289..50101975f9 100644 --- a/pkg/plugin/framework/backup_item_action_client.go +++ b/pkg/plugin/framework/backup_item_action_client.go @@ -27,39 +27,40 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) // NewBackupItemActionPlugin constructs a BackupItemActionPlugin. -func NewBackupItemActionPlugin(options ...PluginOption) *BackupItemActionPlugin { +func NewBackupItemActionPlugin(options ...common.PluginOption) *BackupItemActionPlugin { return &BackupItemActionPlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } // BackupItemActionGRPCClient implements the backup/ItemAction interface and uses a // gRPC client to make calls to the plugin server. type BackupItemActionGRPCClient struct { - *clientBase - grpcClient proto.BackupItemActionClient + *common.ClientBase + grpcClient protobiav1.BackupItemActionClient } -func newBackupItemActionGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newBackupItemActionGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &BackupItemActionGRPCClient{ - clientBase: base, - grpcClient: proto.NewBackupItemActionClient(clientConn), + ClientBase: base, + grpcClient: protobiav1.NewBackupItemActionClient(clientConn), } } func (c *BackupItemActionGRPCClient) AppliesTo() (velero.ResourceSelector, error) { - req := &proto.BackupItemActionAppliesToRequest{ - Plugin: c.plugin, + req := &protobiav1.BackupItemActionAppliesToRequest{ + Plugin: c.Plugin, } res, err := c.grpcClient.AppliesTo(context.Background(), req) if err != nil { - return velero.ResourceSelector{}, fromGRPCError(err) + return velero.ResourceSelector{}, common.FromGRPCError(err) } if res.ResourceSelector == nil { @@ -86,15 +87,15 @@ func (c *BackupItemActionGRPCClient) Execute(item runtime.Unstructured, backup * return nil, nil, errors.WithStack(err) } - req := &proto.ExecuteRequest{ - Plugin: c.plugin, + req := &protobiav1.ExecuteRequest{ + Plugin: c.Plugin, Item: itemJSON, Backup: backupJSON, } res, err := c.grpcClient.Execute(context.Background(), req) if err != nil { - return nil, nil, fromGRPCError(err) + return nil, nil, common.FromGRPCError(err) } var updatedItem unstructured.Unstructured diff --git a/pkg/plugin/framework/backup_item_action_server.go b/pkg/plugin/framework/backup_item_action_server.go index fb4b8a6af6..bde6c2148a 100644 --- a/pkg/plugin/framework/backup_item_action_server.go +++ b/pkg/plugin/framework/backup_item_action_server.go @@ -24,23 +24,26 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" + protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" ) // BackupItemActionGRPCServer implements the proto-generated BackupItemAction interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type BackupItemActionGRPCServer struct { - mux *serverMux + mux *common.ServerMux } -func (s *BackupItemActionGRPCServer) getImpl(name string) (velero.BackupItemAction, error) { - impl, err := s.mux.getHandler(name) +func (s *BackupItemActionGRPCServer) getImpl(name string) (biav1.BackupItemAction, error) { + impl, err := s.mux.GetHandler(name) if err != nil { return nil, err } - itemAction, ok := impl.(velero.BackupItemAction) + itemAction, ok := impl.(biav1.BackupItemAction) if !ok { return nil, errors.Errorf("%T is not a backup item action", impl) } @@ -48,25 +51,27 @@ func (s *BackupItemActionGRPCServer) getImpl(name string) (velero.BackupItemActi return itemAction, nil } -func (s *BackupItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto.BackupItemActionAppliesToRequest) (response *proto.BackupItemActionAppliesToResponse, err error) { +func (s *BackupItemActionGRPCServer) AppliesTo( + ctx context.Context, req *protobiav1.BackupItemActionAppliesToRequest) ( + response *protobiav1.BackupItemActionAppliesToResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } resourceSelector, err := impl.AppliesTo() if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } - return &proto.BackupItemActionAppliesToResponse{ - &proto.ResourceSelector{ + return &protobiav1.BackupItemActionAppliesToResponse{ + ResourceSelector: &proto.ResourceSelector{ IncludedNamespaces: resourceSelector.IncludedNamespaces, ExcludedNamespaces: resourceSelector.ExcludedNamespaces, IncludedResources: resourceSelector.IncludedResources, @@ -76,31 +81,32 @@ func (s *BackupItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto.B }, nil } -func (s *BackupItemActionGRPCServer) Execute(ctx context.Context, req *proto.ExecuteRequest) (response *proto.ExecuteResponse, err error) { +func (s *BackupItemActionGRPCServer) Execute( + ctx context.Context, req *protobiav1.ExecuteRequest) (response *protobiav1.ExecuteResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var item unstructured.Unstructured var backup api.Backup if err := json.Unmarshal(req.Item, &item); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := json.Unmarshal(req.Backup, &backup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } updatedItem, additionalItems, err := impl.Execute(&item, &backup) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } // If the plugin implementation returned a nil updatedItem (meaning no modifications), reset updatedItem to the @@ -111,11 +117,11 @@ func (s *BackupItemActionGRPCServer) Execute(ctx context.Context, req *proto.Exe } else { updatedItemJSON, err = json.Marshal(updatedItem.UnstructuredContent()) if err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } } - res := &proto.ExecuteResponse{ + res := &protobiav1.ExecuteResponse{ Item: updatedItemJSON, } diff --git a/pkg/plugin/framework/backup_item_action_test.go b/pkg/plugin/framework/backup_item_action_test.go index 7d41e0e781..a88aaf853b 100644 --- a/pkg/plugin/framework/backup_item_action_test.go +++ b/pkg/plugin/framework/backup_item_action_test.go @@ -29,9 +29,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/backup/mocks" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" + protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + mocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/backupitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -146,21 +148,21 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - itemAction := &mocks.ItemAction{} + itemAction := &mocks.BackupItemAction{} defer itemAction.AssertExpectations(t) if !test.skipMock { itemAction.On("Execute", &validItemObject, &validBackupObject).Return(test.implUpdatedItem, test.implAdditionalItems, test.implError) } - s := &BackupItemActionGRPCServer{mux: &serverMux{ - serverLog: velerotest.NewLogger(), - handlers: map[string]interface{}{ + s := &BackupItemActionGRPCServer{mux: &common.ServerMux{ + ServerLog: velerotest.NewLogger(), + Handlers: map[string]interface{}{ "xyz": itemAction, }, }} - req := &proto.ExecuteRequest{ + req := &protobiav1.ExecuteRequest{ Plugin: "xyz", Item: test.item, Backup: test.backup, diff --git a/pkg/plugin/framework/client_dispenser.go b/pkg/plugin/framework/common/client_dispenser.go similarity index 86% rename from pkg/plugin/framework/client_dispenser.go rename to pkg/plugin/framework/common/client_dispenser.go index 83e934d725..1abb11c2df 100644 --- a/pkg/plugin/framework/client_dispenser.go +++ b/pkg/plugin/framework/common/client_dispenser.go @@ -14,17 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "github.com/sirupsen/logrus" "google.golang.org/grpc" ) -// clientBase implements client and contains shared fields common to all clients. -type clientBase struct { - plugin string - logger logrus.FieldLogger +// ClientBase implements client and contains shared fields common to all clients. +type ClientBase struct { + Plugin string + Logger logrus.FieldLogger } type ClientDispenser interface { @@ -44,10 +44,10 @@ type clientDispenser struct { clients map[string]interface{} } -type clientInitFunc func(base *clientBase, clientConn *grpc.ClientConn) interface{} +type clientInitFunc func(base *ClientBase, clientConn *grpc.ClientConn) interface{} // newClientDispenser creates a new clientDispenser. -func newClientDispenser(logger logrus.FieldLogger, clientConn *grpc.ClientConn, initFunc clientInitFunc) *clientDispenser { +func NewClientDispenser(logger logrus.FieldLogger, clientConn *grpc.ClientConn, initFunc clientInitFunc) *clientDispenser { return &clientDispenser{ clientConn: clientConn, logger: logger, @@ -63,9 +63,9 @@ func (cd *clientDispenser) ClientFor(name string) interface{} { return client } - base := &clientBase{ - plugin: name, - logger: cd.logger, + base := &ClientBase{ + Plugin: name, + Logger: cd.logger, } // Initialize the plugin (e.g. newBackupItemActionGRPCClient()) client := cd.initFunc(base, cd.clientConn) diff --git a/pkg/plugin/framework/client_dispenser_test.go b/pkg/plugin/framework/common/client_dispenser_test.go similarity index 83% rename from pkg/plugin/framework/client_dispenser_test.go rename to pkg/plugin/framework/common/client_dispenser_test.go index 9ec5266691..bd1461658a 100644 --- a/pkg/plugin/framework/client_dispenser_test.go +++ b/pkg/plugin/framework/common/client_dispenser_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "testing" @@ -26,7 +26,7 @@ import ( ) type fakeClient struct { - base *clientBase + base *ClientBase clientConn *grpc.ClientConn } @@ -36,11 +36,11 @@ func TestNewClientDispenser(t *testing.T) { clientConn := new(grpc.ClientConn) c := 3 - initFunc := func(base *clientBase, clientConn *grpc.ClientConn) interface{} { + initFunc := func(base *ClientBase, clientConn *grpc.ClientConn) interface{} { return c } - cd := newClientDispenser(logger, clientConn, initFunc) + cd := NewClientDispenser(logger, clientConn, initFunc) assert.Equal(t, clientConn, cd.clientConn) assert.NotNil(t, cd.clients) assert.Empty(t, cd.clients) @@ -52,23 +52,23 @@ func TestClientFor(t *testing.T) { c := new(fakeClient) count := 0 - initFunc := func(base *clientBase, clientConn *grpc.ClientConn) interface{} { + initFunc := func(base *ClientBase, clientConn *grpc.ClientConn) interface{} { c.base = base c.clientConn = clientConn count++ return c } - cd := newClientDispenser(logger, clientConn, initFunc) + cd := NewClientDispenser(logger, clientConn, initFunc) actual := cd.ClientFor("pod") require.IsType(t, &fakeClient{}, actual) typed := actual.(*fakeClient) assert.Equal(t, 1, count) assert.Equal(t, &typed, &c) - expectedBase := &clientBase{ - plugin: "pod", - logger: logger, + expectedBase := &ClientBase{ + Plugin: "pod", + Logger: logger, } assert.Equal(t, expectedBase, typed.base) assert.Equal(t, clientConn, typed.clientConn) diff --git a/pkg/plugin/framework/client_errors.go b/pkg/plugin/framework/common/client_errors.go similarity index 84% rename from pkg/plugin/framework/client_errors.go rename to pkg/plugin/framework/common/client_errors.go index adad199dbe..377851aa8f 100644 --- a/pkg/plugin/framework/client_errors.go +++ b/pkg/plugin/framework/common/client_errors.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "google.golang.org/grpc/status" @@ -22,7 +22,7 @@ import ( proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) -// fromGRPCError takes a gRPC status error, extracts a stack trace +// FromGRPCError takes a gRPC status error, extracts a stack trace // from the details if it exists, and returns an error that can // provide information about where it was created. // @@ -30,7 +30,7 @@ import ( // all errors returned from the plugin server before they're passed back to // the rest of the Velero codebase. This will enable them to display location // information when they're logged. -func fromGRPCError(err error) error { +func FromGRPCError(err error) error { statusErr, ok := status.FromError(err) if !ok { return statusErr.Err() @@ -39,7 +39,7 @@ func fromGRPCError(err error) error { for _, detail := range statusErr.Details() { switch t := detail.(type) { case *proto.Stack: - return &protoStackError{ + return &ProtoStackError{ error: err, stack: t, } @@ -49,12 +49,12 @@ func fromGRPCError(err error) error { return err } -type protoStackError struct { +type ProtoStackError struct { error stack *proto.Stack } -func (e *protoStackError) File() string { +func (e *ProtoStackError) File() string { if e.stack == nil || len(e.stack.Frames) < 1 { return "" } @@ -62,7 +62,7 @@ func (e *protoStackError) File() string { return e.stack.Frames[0].File } -func (e *protoStackError) Line() int32 { +func (e *ProtoStackError) Line() int32 { if e.stack == nil || len(e.stack.Frames) < 1 { return 0 } @@ -70,7 +70,7 @@ func (e *protoStackError) Line() int32 { return e.stack.Frames[0].Line } -func (e *protoStackError) Function() string { +func (e *ProtoStackError) Function() string { if e.stack == nil || len(e.stack.Frames) < 1 { return "" } diff --git a/pkg/plugin/framework/handle_panic.go b/pkg/plugin/framework/common/handle_panic.go similarity index 84% rename from pkg/plugin/framework/handle_panic.go rename to pkg/plugin/framework/common/handle_panic.go index 4ea0ec2b52..e4324898dd 100644 --- a/pkg/plugin/framework/handle_panic.go +++ b/pkg/plugin/framework/common/handle_panic.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "runtime/debug" @@ -23,8 +23,8 @@ import ( "google.golang.org/grpc/codes" ) -// handlePanic is a panic handler for the server half of velero plugins. -func handlePanic(p interface{}) error { +// HandlePanic is a panic handler for the server half of velero plugins. +func HandlePanic(p interface{}) error { if p == nil { return nil } @@ -37,7 +37,7 @@ func handlePanic(p interface{}) error { if panicErr, ok := p.(error); !ok { err = errors.Errorf("plugin panicked: %v", p) } else { - if _, ok := panicErr.(stackTracer); ok { + if _, ok := panicErr.(StackTracer); ok { err = panicErr } else { errWithStacktrace := errors.Errorf("%v, stack trace: %s", panicErr, debug.Stack()) @@ -45,5 +45,5 @@ func handlePanic(p interface{}) error { } } - return newGRPCErrorWithCode(err, codes.Aborted) + return NewGRPCErrorWithCode(err, codes.Aborted) } diff --git a/pkg/plugin/framework/plugin_base.go b/pkg/plugin/framework/common/plugin_base.go similarity index 65% rename from pkg/plugin/framework/plugin_base.go rename to pkg/plugin/framework/common/plugin_base.go index 4a97fa47b9..12e444b749 100644 --- a/pkg/plugin/framework/plugin_base.go +++ b/pkg/plugin/framework/common/plugin_base.go @@ -14,35 +14,35 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "github.com/sirupsen/logrus" ) -type pluginBase struct { - clientLogger logrus.FieldLogger - *serverMux +type PluginBase struct { + ClientLogger logrus.FieldLogger + *ServerMux } -func newPluginBase(options ...PluginOption) *pluginBase { - base := new(pluginBase) +func NewPluginBase(options ...PluginOption) *PluginBase { + base := new(PluginBase) for _, option := range options { option(base) } return base } -type PluginOption func(base *pluginBase) +type PluginOption func(base *PluginBase) func ClientLogger(logger logrus.FieldLogger) PluginOption { - return func(base *pluginBase) { - base.clientLogger = logger + return func(base *PluginBase) { + base.ClientLogger = logger } } -func serverLogger(logger logrus.FieldLogger) PluginOption { - return func(base *pluginBase) { - base.serverMux = newServerMux(logger) +func ServerLogger(logger logrus.FieldLogger) PluginOption { + return func(base *PluginBase) { + base.ServerMux = NewServerMux(logger) } } diff --git a/pkg/plugin/framework/plugin_base_test.go b/pkg/plugin/framework/common/plugin_base_test.go similarity index 84% rename from pkg/plugin/framework/plugin_base_test.go rename to pkg/plugin/framework/common/plugin_base_test.go index 3d25732586..04e066d0c6 100644 --- a/pkg/plugin/framework/plugin_base_test.go +++ b/pkg/plugin/framework/common/plugin_base_test.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "testing" @@ -24,17 +24,17 @@ import ( ) func TestClientLogger(t *testing.T) { - base := &pluginBase{} + base := &PluginBase{} logger := test.NewLogger() f := ClientLogger(logger) f(base) - assert.Equal(t, logger, base.clientLogger) + assert.Equal(t, logger, base.ClientLogger) } func TestServerLogger(t *testing.T) { - base := &pluginBase{} + base := &PluginBase{} logger := test.NewLogger() - f := serverLogger(logger) + f := ServerLogger(logger) f(base) - assert.Equal(t, newServerMux(logger), base.serverMux) + assert.Equal(t, NewServerMux(logger), base.ServerMux) } diff --git a/pkg/plugin/framework/plugin_kinds.go b/pkg/plugin/framework/common/plugin_kinds.go similarity index 89% rename from pkg/plugin/framework/plugin_kinds.go rename to pkg/plugin/framework/common/plugin_kinds.go index f48f7f4983..2e0119e2fd 100644 --- a/pkg/plugin/framework/plugin_kinds.go +++ b/pkg/plugin/framework/common/plugin_kinds.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common // PluginKind is a type alias for a string that describes // the kind of a Velero-supported plugin. @@ -48,6 +48,11 @@ const ( PluginKindPluginLister PluginKind = "PluginLister" ) +// If there are plugin kinds that are adaptable to newer API versions, list them here. +// The older (adaptable) version is the key, and the value is the full list of newer +// plugin kinds that are capable of adapting it. +var PluginKindsAdaptableTo = map[PluginKind][]PluginKind{} + // AllPluginKinds contains all the valid plugin kinds that Velero supports, excluding PluginLister because that is not a // kind that a developer would ever need to implement (it's handled by Velero and the Velero plugin library code). func AllPluginKinds() map[string]PluginKind { diff --git a/pkg/plugin/framework/server_errors.go b/pkg/plugin/framework/common/server_errors.go similarity index 78% rename from pkg/plugin/framework/server_errors.go rename to pkg/plugin/framework/common/server_errors.go index 55f8859fde..7763d4e9ec 100644 --- a/pkg/plugin/framework/server_errors.go +++ b/pkg/plugin/framework/common/server_errors.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( goproto "github.com/golang/protobuf/proto" @@ -26,13 +26,13 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/logging" ) -// newGRPCErrorWithCode wraps err in a gRPC status error with the error's stack trace +// NewGRPCErrorWithCode wraps err in a gRPC status error with the error's stack trace // included in the details if it exists. This provides an easy way to send // stack traces from plugin servers across the wire to the plugin client. // // This function should be used in the internal plugin server code to wrap // all errors before they're returned. -func newGRPCErrorWithCode(err error, code codes.Code, details ...goproto.Message) error { +func NewGRPCErrorWithCode(err error, code codes.Code, details ...goproto.Message) error { // if it's already a gRPC status error, use it; otherwise, create a new one statusErr, ok := status.FromError(err) if !ok { @@ -40,7 +40,7 @@ func newGRPCErrorWithCode(err error, code codes.Code, details ...goproto.Message } // get a Stack for the error and add it to details - if stack := errorStack(err); stack != nil { + if stack := ErrorStack(err); stack != nil { details = append(details, stack) } @@ -52,16 +52,16 @@ func newGRPCErrorWithCode(err error, code codes.Code, details ...goproto.Message return statusErr.Err() } -// newGRPCError is a convenience function for creating a new gRPC error +// NewGRPCError is a convenience function for creating a new gRPC error // with code = codes.Unknown -func newGRPCError(err error, details ...goproto.Message) error { - return newGRPCErrorWithCode(err, codes.Unknown, details...) +func NewGRPCError(err error, details ...goproto.Message) error { + return NewGRPCErrorWithCode(err, codes.Unknown, details...) } -// errorStack gets a stack trace, if it exists, from the provided error, and +// ErrorStack gets a stack trace, if it exists, from the provided error, and // returns it as a *proto.Stack. -func errorStack(err error) *proto.Stack { - stackTracer, ok := err.(stackTracer) +func ErrorStack(err error) *proto.Stack { + stackTracer, ok := err.(StackTracer) if !ok { return nil } @@ -80,6 +80,6 @@ func errorStack(err error) *proto.Stack { return stackTrace } -type stackTracer interface { +type StackTracer interface { StackTrace() errors.StackTrace } diff --git a/pkg/plugin/framework/server_mux.go b/pkg/plugin/framework/common/server_mux.go similarity index 78% rename from pkg/plugin/framework/server_mux.go rename to pkg/plugin/framework/common/server_mux.go index 4babea3841..960975c7e6 100644 --- a/pkg/plugin/framework/server_mux.go +++ b/pkg/plugin/framework/common/server_mux.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "strings" @@ -29,42 +29,42 @@ import ( // (ObjectStore, VolumeSnapshotter, BackupItemAction, RestoreItemAction). type HandlerInitializer func(logger logrus.FieldLogger) (interface{}, error) -// serverMux manages multiple implementations of a single plugin kind, such as pod and pvc BackupItemActions. -type serverMux struct { +// ServerMux manages multiple implementations of a single plugin kind, such as pod and pvc BackupItemActions. +type ServerMux struct { kind PluginKind initializers map[string]HandlerInitializer - handlers map[string]interface{} - serverLog logrus.FieldLogger + Handlers map[string]interface{} + ServerLog logrus.FieldLogger } -// newServerMux returns a new serverMux. -func newServerMux(logger logrus.FieldLogger) *serverMux { - return &serverMux{ +// NewServerMux returns a new ServerMux. +func NewServerMux(logger logrus.FieldLogger) *ServerMux { + return &ServerMux{ initializers: make(map[string]HandlerInitializer), - handlers: make(map[string]interface{}), - serverLog: logger, + Handlers: make(map[string]interface{}), + ServerLog: logger, } } // register validates the plugin name and registers the // initializer for the given name. -func (m *serverMux) register(name string, f HandlerInitializer) { - if err := ValidatePluginName(name, m.names()); err != nil { - m.serverLog.Errorf("invalid plugin name %q: %s", name, err) +func (m *ServerMux) Register(name string, f HandlerInitializer) { + if err := ValidatePluginName(name, m.Names()); err != nil { + m.ServerLog.Errorf("invalid plugin name %q: %s", name, err) return } m.initializers[name] = f } // names returns a list of all registered implementations. -func (m *serverMux) names() []string { +func (m *ServerMux) Names() []string { return sets.StringKeySet(m.initializers).List() } -// getHandler returns the instance for a plugin with the given name. If an instance has already been initialized, +// GetHandler returns the instance for a plugin with the given name. If an instance has already been initialized, // that is returned. Otherwise, the instance is initialized by calling its initialization function. -func (m *serverMux) getHandler(name string) (interface{}, error) { - if instance, found := m.handlers[name]; found { +func (m *ServerMux) GetHandler(name string) (interface{}, error) { + if instance, found := m.Handlers[name]; found { return instance, nil } @@ -73,14 +73,14 @@ func (m *serverMux) getHandler(name string) (interface{}, error) { return nil, errors.Errorf("%v plugin: %s was not found or has an invalid name format", m.kind, name) } - instance, err := initializer(m.serverLog) + instance, err := initializer(m.ServerLog) if err != nil { return nil, err } - m.handlers[name] = instance + m.Handlers[name] = instance - return m.handlers[name], nil + return m.Handlers[name], nil } // ValidatePluginName checks if the given name: diff --git a/pkg/plugin/framework/server_mux_test.go b/pkg/plugin/framework/common/server_mux_test.go similarity index 99% rename from pkg/plugin/framework/server_mux_test.go rename to pkg/plugin/framework/common/server_mux_test.go index 1a9be9a68b..1e84555f3a 100644 --- a/pkg/plugin/framework/server_mux_test.go +++ b/pkg/plugin/framework/common/server_mux_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package common import ( "strings" diff --git a/pkg/plugin/framework/delete_item_action.go b/pkg/plugin/framework/delete_item_action.go index fc9be502c7..7d70938ca7 100644 --- a/pkg/plugin/framework/delete_item_action.go +++ b/pkg/plugin/framework/delete_item_action.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) @@ -29,16 +30,16 @@ import ( // interface. type DeleteItemActionPlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns a RestoreItemAction gRPC client. func (p *DeleteItemActionPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newDeleteItemActionGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newDeleteItemActionGRPCClient), nil } // GRPCServer registers a DeleteItemAction gRPC server. func (p *DeleteItemActionPlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterDeleteItemActionServer(server, &DeleteItemActionGRPCServer{mux: p.serverMux}) + proto.RegisterDeleteItemActionServer(server, &DeleteItemActionGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/delete_item_action_client.go b/pkg/plugin/framework/delete_item_action_client.go index e9adae6d01..4988d5f085 100644 --- a/pkg/plugin/framework/delete_item_action_client.go +++ b/pkg/plugin/framework/delete_item_action_client.go @@ -23,6 +23,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) @@ -30,30 +31,30 @@ import ( var _ velero.DeleteItemAction = &DeleteItemActionGRPCClient{} // NewDeleteItemActionPlugin constructs a DeleteItemActionPlugin. -func NewDeleteItemActionPlugin(options ...PluginOption) *DeleteItemActionPlugin { +func NewDeleteItemActionPlugin(options ...common.PluginOption) *DeleteItemActionPlugin { return &DeleteItemActionPlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } // DeleteItemActionGRPCClient implements the backup/ItemAction interface and uses a // gRPC client to make calls to the plugin server. type DeleteItemActionGRPCClient struct { - *clientBase + *common.ClientBase grpcClient proto.DeleteItemActionClient } -func newDeleteItemActionGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newDeleteItemActionGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &DeleteItemActionGRPCClient{ - clientBase: base, + ClientBase: base, grpcClient: proto.NewDeleteItemActionClient(clientConn), } } func (c *DeleteItemActionGRPCClient) AppliesTo() (velero.ResourceSelector, error) { - res, err := c.grpcClient.AppliesTo(context.Background(), &proto.DeleteItemActionAppliesToRequest{Plugin: c.plugin}) + res, err := c.grpcClient.AppliesTo(context.Background(), &proto.DeleteItemActionAppliesToRequest{Plugin: c.Plugin}) if err != nil { - return velero.ResourceSelector{}, fromGRPCError(err) + return velero.ResourceSelector{}, common.FromGRPCError(err) } if res.ResourceSelector == nil { @@ -81,14 +82,14 @@ func (c *DeleteItemActionGRPCClient) Execute(input *velero.DeleteItemActionExecu } req := &proto.DeleteItemActionExecuteRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Item: itemJSON, Backup: backupJSON, } // First return item is just an empty struct no matter what. if _, err = c.grpcClient.Execute(context.Background(), req); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil diff --git a/pkg/plugin/framework/delete_item_action_server.go b/pkg/plugin/framework/delete_item_action_server.go index 3c6be3b547..e298969d13 100644 --- a/pkg/plugin/framework/delete_item_action_server.go +++ b/pkg/plugin/framework/delete_item_action_server.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) @@ -31,11 +32,11 @@ import ( // DeleteItemActionGRPCServer implements the proto-generated DeleteItemActionServer interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type DeleteItemActionGRPCServer struct { - mux *serverMux + mux *common.ServerMux } func (s *DeleteItemActionGRPCServer) getImpl(name string) (velero.DeleteItemAction, error) { - impl, err := s.mux.getHandler(name) + impl, err := s.mux.GetHandler(name) if err != nil { return nil, err } @@ -50,23 +51,23 @@ func (s *DeleteItemActionGRPCServer) getImpl(name string) (velero.DeleteItemActi func (s *DeleteItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto.DeleteItemActionAppliesToRequest) (response *proto.DeleteItemActionAppliesToResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } resourceSelector, err := impl.AppliesTo() if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.DeleteItemActionAppliesToResponse{ - &proto.ResourceSelector{ + ResourceSelector: &proto.ResourceSelector{ IncludedNamespaces: resourceSelector.IncludedNamespaces, ExcludedNamespaces: resourceSelector.ExcludedNamespaces, IncludedResources: resourceSelector.IncludedResources, @@ -78,14 +79,14 @@ func (s *DeleteItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto.D func (s *DeleteItemActionGRPCServer) Execute(ctx context.Context, req *proto.DeleteItemActionExecuteRequest) (_ *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var ( @@ -94,18 +95,18 @@ func (s *DeleteItemActionGRPCServer) Execute(ctx context.Context, req *proto.Del ) if err := json.Unmarshal(req.Item, &item); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err = json.Unmarshal(req.Backup, &backup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := impl.Execute(&velero.DeleteItemActionExecuteInput{ Item: &item, Backup: &backup, }); err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil diff --git a/pkg/plugin/framework/interface.go b/pkg/plugin/framework/interface.go index 1394e8e826..4eeb3049b5 100644 --- a/pkg/plugin/framework/interface.go +++ b/pkg/plugin/framework/interface.go @@ -24,5 +24,5 @@ type Interface interface { // names returns a list of all the registered implementations for this plugin (such as "pod" and "pvc" for // BackupItemAction). - names() []string + Names() []string } diff --git a/pkg/plugin/framework/item_snapshotter.go b/pkg/plugin/framework/item_snapshotter.go index 6e6f91439f..f9af50e68c 100644 --- a/pkg/plugin/framework/item_snapshotter.go +++ b/pkg/plugin/framework/item_snapshotter.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) @@ -29,16 +30,16 @@ import ( // interface. type ItemSnapshotterPlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns a clientDispenser for ItemSnapshotter gRPC clients. func (p *ItemSnapshotterPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newItemSnapshotterGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newItemSnapshotterGRPCClient), nil } // GRPCServer registers an ItemSnapshotter gRPC server. func (p *ItemSnapshotterPlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterItemSnapshotterServer(server, &ItemSnapshotterGRPCServer{mux: p.serverMux}) + proto.RegisterItemSnapshotterServer(server, &ItemSnapshotterGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/item_snapshotter_client.go b/pkg/plugin/framework/item_snapshotter_client.go index dd341b9662..323c3541b3 100644 --- a/pkg/plugin/framework/item_snapshotter_client.go +++ b/pkg/plugin/framework/item_snapshotter_client.go @@ -21,26 +21,26 @@ import ( "encoding/json" "time" - isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" - "github.com/pkg/errors" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" ) // NewItemSnapshotterPlugin constructs a ItemSnapshotterPlugin. -func NewItemSnapshotterPlugin(options ...PluginOption) *ItemSnapshotterPlugin { +func NewItemSnapshotterPlugin(options ...common.PluginOption) *ItemSnapshotterPlugin { return &ItemSnapshotterPlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } -func newItemSnapshotterGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newItemSnapshotterGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &ItemSnapshotterGRPCClient{ - clientBase: base, + ClientBase: base, grpcClient: proto.NewItemSnapshotterClient(clientConn), } } @@ -48,13 +48,13 @@ func newItemSnapshotterGRPCClient(base *clientBase, clientConn *grpc.ClientConn) // ItemSnapshotterGRPCClient implements the ItemSnapshotter interface and uses a // gRPC client to make calls to the plugin server. type ItemSnapshotterGRPCClient struct { - *clientBase + *common.ClientBase grpcClient proto.ItemSnapshotterClient } func (recv ItemSnapshotterGRPCClient) Init(config map[string]string) error { req := &proto.ItemSnapshotterInitRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, Config: config, } @@ -64,12 +64,12 @@ func (recv ItemSnapshotterGRPCClient) Init(config map[string]string) error { func (recv ItemSnapshotterGRPCClient) AppliesTo() (velero.ResourceSelector, error) { req := &proto.ItemSnapshotterAppliesToRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, } res, err := recv.grpcClient.AppliesTo(context.Background(), req) if err != nil { - return velero.ResourceSelector{}, fromGRPCError(err) + return velero.ResourceSelector{}, common.FromGRPCError(err) } if res.ResourceSelector == nil { @@ -96,7 +96,7 @@ func (recv ItemSnapshotterGRPCClient) AlsoHandles(input *isv1.AlsoHandlesInput) return nil, errors.WithStack(err) } req := &proto.AlsoHandlesRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, Item: itemJSON, Backup: backupJSON, } @@ -121,7 +121,7 @@ func (recv ItemSnapshotterGRPCClient) SnapshotItem(ctx context.Context, input *i return nil, errors.WithStack(err) } req := &proto.SnapshotItemRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, Item: itemJSON, Backup: backupJSON, } @@ -153,7 +153,7 @@ func (recv ItemSnapshotterGRPCClient) Progress(input *isv1.ProgressInput) (*isv1 return nil, errors.WithStack(err) } req := &proto.ProgressRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, ItemID: resourceIdentifierToProto(input.ItemID), SnapshotID: input.SnapshotID, Backup: backupJSON, @@ -184,7 +184,7 @@ func (recv ItemSnapshotterGRPCClient) Progress(input *isv1.ProgressInput) (*isv1 func (recv ItemSnapshotterGRPCClient) DeleteSnapshot(ctx context.Context, input *isv1.DeleteSnapshotInput) error { req := &proto.DeleteItemSnapshotRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, Params: input.Params, SnapshotID: input.SnapshotID, } @@ -210,7 +210,7 @@ func (recv ItemSnapshotterGRPCClient) CreateItemFromSnapshot(ctx context.Context return nil, errors.WithStack(err) } req := &proto.CreateItemFromSnapshotRequest{ - Plugin: recv.plugin, + Plugin: recv.Plugin, Item: itemJSON, SnapshotID: input.SnapshotID, ItemFromBackup: itemFromBackupJSON, diff --git a/pkg/plugin/framework/item_snapshotter_server.go b/pkg/plugin/framework/item_snapshotter_server.go index 166e6d07f5..21ab92110f 100644 --- a/pkg/plugin/framework/item_snapshotter_server.go +++ b/pkg/plugin/framework/item_snapshotter_server.go @@ -27,17 +27,18 @@ import ( "github.com/pkg/errors" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) // ItemSnapshotterGRPCServer implements the proto-generated ItemSnapshotterServer interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type ItemSnapshotterGRPCServer struct { - mux *serverMux + mux *common.ServerMux } func (recv *ItemSnapshotterGRPCServer) getImpl(name string) (isv1.ItemSnapshotter, error) { - impl, err := recv.mux.getHandler(name) + impl, err := recv.mux.GetHandler(name) if err != nil { return nil, err } @@ -52,19 +53,19 @@ func (recv *ItemSnapshotterGRPCServer) getImpl(name string) (isv1.ItemSnapshotte func (recv *ItemSnapshotterGRPCServer) Init(c context.Context, req *proto.ItemSnapshotterInitRequest) (response *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } err = impl.Init(req.Config) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil @@ -72,23 +73,23 @@ func (recv *ItemSnapshotterGRPCServer) Init(c context.Context, req *proto.ItemSn func (recv *ItemSnapshotterGRPCServer) AppliesTo(ctx context.Context, req *proto.ItemSnapshotterAppliesToRequest) (response *proto.ItemSnapshotterAppliesToResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } resourceSelector, err := impl.AppliesTo() if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.ItemSnapshotterAppliesToResponse{ - &proto.ResourceSelector{ + ResourceSelector: &proto.ResourceSelector{ IncludedNamespaces: resourceSelector.IncludedNamespaces, ExcludedNamespaces: resourceSelector.ExcludedNamespaces, IncludedResources: resourceSelector.IncludedResources, @@ -100,23 +101,23 @@ func (recv *ItemSnapshotterGRPCServer) AppliesTo(ctx context.Context, req *proto func (recv *ItemSnapshotterGRPCServer) AlsoHandles(ctx context.Context, req *proto.AlsoHandlesRequest) (res *proto.AlsoHandlesResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var item unstructured.Unstructured var backup api.Backup if err := json.Unmarshal(req.Item, &item); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := json.Unmarshal(req.Backup, &backup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } ahi := isv1.AlsoHandlesInput{ Item: &item, @@ -124,7 +125,7 @@ func (recv *ItemSnapshotterGRPCServer) AlsoHandles(ctx context.Context, req *pro } alsoHandles, err := impl.AlsoHandles(&ahi) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } res = &proto.AlsoHandlesResponse{} @@ -136,23 +137,23 @@ func (recv *ItemSnapshotterGRPCServer) AlsoHandles(ctx context.Context, req *pro func (recv *ItemSnapshotterGRPCServer) SnapshotItem(ctx context.Context, req *proto.SnapshotItemRequest) (res *proto.SnapshotItemResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var item unstructured.Unstructured var backup api.Backup if err := json.Unmarshal(req.Item, &item); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := json.Unmarshal(req.Backup, &backup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } sii := isv1.SnapshotItemInput{ Item: &item, @@ -169,7 +170,7 @@ func (recv *ItemSnapshotterGRPCServer) SnapshotItem(ctx context.Context, req *pr } else { updatedItemJSON, err = json.Marshal(sio.UpdatedItem.UnstructuredContent()) if err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } } res = &proto.SnapshotItemResponse{ @@ -184,18 +185,18 @@ func (recv *ItemSnapshotterGRPCServer) SnapshotItem(ctx context.Context, req *pr func (recv *ItemSnapshotterGRPCServer) Progress(ctx context.Context, req *proto.ProgressRequest) (res *proto.ProgressResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var backup api.Backup if err := json.Unmarshal(req.Backup, &backup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } sipi := &isv1.ProgressInput{ ItemID: protoToResourceIdentifier(req.ItemID), @@ -205,7 +206,7 @@ func (recv *ItemSnapshotterGRPCServer) Progress(ctx context.Context, req *proto. sipo, err := impl.Progress(sipi) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } res = &proto.ProgressResponse{ @@ -223,18 +224,18 @@ func (recv *ItemSnapshotterGRPCServer) Progress(ctx context.Context, req *proto. func (recv *ItemSnapshotterGRPCServer) DeleteSnapshot(ctx context.Context, req *proto.DeleteItemSnapshotRequest) (empty *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var itemFromBackup unstructured.Unstructured if err := json.Unmarshal(req.ItemFromBackup, &itemFromBackup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } disi := isv1.DeleteSnapshotInput{ @@ -246,36 +247,36 @@ func (recv *ItemSnapshotterGRPCServer) DeleteSnapshot(ctx context.Context, req * err = impl.DeleteSnapshot(ctx, &disi) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return } func (recv *ItemSnapshotterGRPCServer) CreateItemFromSnapshot(ctx context.Context, req *proto.CreateItemFromSnapshotRequest) (res *proto.CreateItemFromSnapshotResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := recv.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var snapshottedItem unstructured.Unstructured if err := json.Unmarshal(req.Item, &snapshottedItem); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } var itemFromBackup unstructured.Unstructured if err := json.Unmarshal(req.Item, &itemFromBackup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } var restore api.Restore if err := json.Unmarshal(req.Restore, &restore); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } cii := isv1.CreateItemInput{ @@ -289,7 +290,7 @@ func (recv *ItemSnapshotterGRPCServer) CreateItemFromSnapshot(ctx context.Contex cio, err := impl.CreateItemFromSnapshot(ctx, &cii) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var updatedItemJSON []byte @@ -298,7 +299,7 @@ func (recv *ItemSnapshotterGRPCServer) CreateItemFromSnapshot(ctx context.Contex } else { updatedItemJSON, err = json.Marshal(cio.UpdatedItem.UnstructuredContent()) if err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } } res = &proto.CreateItemFromSnapshotResponse{ diff --git a/pkg/plugin/framework/object_store.go b/pkg/plugin/framework/object_store.go index bd7a90cf3c..ddf7414218 100644 --- a/pkg/plugin/framework/object_store.go +++ b/pkg/plugin/framework/object_store.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) @@ -29,17 +30,17 @@ import ( // interface. type ObjectStorePlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns an ObjectStore gRPC client. func (p *ObjectStorePlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newObjectStoreGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newObjectStoreGRPCClient), nil } // GRPCServer registers an ObjectStore gRPC server. func (p *ObjectStorePlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterObjectStoreServer(server, &ObjectStoreGRPCServer{mux: p.serverMux}) + proto.RegisterObjectStoreServer(server, &ObjectStoreGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/object_store_client.go b/pkg/plugin/framework/object_store_client.go index 8442749076..7f40921c2a 100644 --- a/pkg/plugin/framework/object_store_client.go +++ b/pkg/plugin/framework/object_store_client.go @@ -24,28 +24,29 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) const byteChunkSize = 16384 // NewObjectStorePlugin construct an ObjectStorePlugin. -func NewObjectStorePlugin(options ...PluginOption) *ObjectStorePlugin { +func NewObjectStorePlugin(options ...common.PluginOption) *ObjectStorePlugin { return &ObjectStorePlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } // ObjectStoreGRPCClient implements the ObjectStore interface and uses a // gRPC client to make calls to the plugin server. type ObjectStoreGRPCClient struct { - *clientBase + *common.ClientBase grpcClient proto.ObjectStoreClient } -func newObjectStoreGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newObjectStoreGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &ObjectStoreGRPCClient{ - clientBase: base, + ClientBase: base, grpcClient: proto.NewObjectStoreClient(clientConn), } } @@ -55,12 +56,12 @@ func newObjectStoreGRPCClient(base *clientBase, clientConn *grpc.ClientConn) int // cannot be initialized from the provided config. func (c *ObjectStoreGRPCClient) Init(config map[string]string) error { req := &proto.ObjectStoreInitRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Config: config, } if _, err := c.grpcClient.Init(context.Background(), req); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil @@ -71,7 +72,7 @@ func (c *ObjectStoreGRPCClient) Init(config map[string]string) error { func (c *ObjectStoreGRPCClient) PutObject(bucket, key string, body io.Reader) error { stream, err := c.grpcClient.PutObject(context.Background()) if err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } // read from the provider io.Reader into chunks, and send each one over @@ -81,7 +82,7 @@ func (c *ObjectStoreGRPCClient) PutObject(bucket, key string, body io.Reader) er n, err := body.Read(chunk) if err == io.EOF { if _, resErr := stream.CloseAndRecv(); resErr != nil { - return fromGRPCError(resErr) + return common.FromGRPCError(resErr) } return nil } @@ -90,8 +91,8 @@ func (c *ObjectStoreGRPCClient) PutObject(bucket, key string, body io.Reader) er return errors.WithStack(err) } - if err := stream.Send(&proto.PutObjectRequest{Plugin: c.plugin, Bucket: bucket, Key: key, Body: chunk[0:n]}); err != nil { - return fromGRPCError(err) + if err := stream.Send(&proto.PutObjectRequest{Plugin: c.Plugin, Bucket: bucket, Key: key, Body: chunk[0:n]}); err != nil { + return common.FromGRPCError(err) } } } @@ -99,7 +100,7 @@ func (c *ObjectStoreGRPCClient) PutObject(bucket, key string, body io.Reader) er // ObjectExists checks if there is an object with the given key in the object storage bucket. func (c *ObjectStoreGRPCClient) ObjectExists(bucket, key string) (bool, error) { req := &proto.ObjectExistsRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Key: key, } @@ -116,14 +117,14 @@ func (c *ObjectStoreGRPCClient) ObjectExists(bucket, key string) (bool, error) { // bucket in object storage. func (c *ObjectStoreGRPCClient) GetObject(bucket, key string) (io.ReadCloser, error) { req := &proto.GetObjectRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Key: key, } stream, err := c.grpcClient.GetObject(context.Background(), req) if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } receive := func() ([]byte, error) { @@ -135,7 +136,7 @@ func (c *ObjectStoreGRPCClient) GetObject(bucket, key string) (io.ReadCloser, er return nil, err } if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } return data.Data, nil @@ -143,7 +144,7 @@ func (c *ObjectStoreGRPCClient) GetObject(bucket, key string) (io.ReadCloser, er close := func() error { if err := stream.CloseSend(); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil } @@ -156,7 +157,7 @@ func (c *ObjectStoreGRPCClient) GetObject(bucket, key string) (io.ReadCloser, er // often used to simulate a directory hierarchy in object storage). func (c *ObjectStoreGRPCClient) ListCommonPrefixes(bucket, prefix, delimiter string) ([]string, error) { req := &proto.ListCommonPrefixesRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Prefix: prefix, Delimiter: delimiter, @@ -164,7 +165,7 @@ func (c *ObjectStoreGRPCClient) ListCommonPrefixes(bucket, prefix, delimiter str res, err := c.grpcClient.ListCommonPrefixes(context.Background(), req) if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } return res.Prefixes, nil @@ -173,14 +174,14 @@ func (c *ObjectStoreGRPCClient) ListCommonPrefixes(bucket, prefix, delimiter str // ListObjects gets a list of all objects in bucket that have the same prefix. func (c *ObjectStoreGRPCClient) ListObjects(bucket, prefix string) ([]string, error) { req := &proto.ListObjectsRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Prefix: prefix, } res, err := c.grpcClient.ListObjects(context.Background(), req) if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } return res.Keys, nil @@ -190,13 +191,13 @@ func (c *ObjectStoreGRPCClient) ListObjects(bucket, prefix string) ([]string, er // bucket. func (c *ObjectStoreGRPCClient) DeleteObject(bucket, key string) error { req := &proto.DeleteObjectRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Key: key, } if _, err := c.grpcClient.DeleteObject(context.Background(), req); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil @@ -205,7 +206,7 @@ func (c *ObjectStoreGRPCClient) DeleteObject(bucket, key string) error { // CreateSignedURL creates a pre-signed URL for the given bucket and key that expires after ttl. func (c *ObjectStoreGRPCClient) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) { req := &proto.CreateSignedURLRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Bucket: bucket, Key: key, Ttl: int64(ttl), @@ -213,7 +214,7 @@ func (c *ObjectStoreGRPCClient) CreateSignedURL(bucket, key string, ttl time.Dur res, err := c.grpcClient.CreateSignedURL(context.Background(), req) if err != nil { - return "", fromGRPCError(err) + return "", common.FromGRPCError(err) } return res.Url, nil diff --git a/pkg/plugin/framework/object_store_server.go b/pkg/plugin/framework/object_store_server.go index b2f2359ce5..2d3ef3658b 100644 --- a/pkg/plugin/framework/object_store_server.go +++ b/pkg/plugin/framework/object_store_server.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" "golang.org/x/net/context" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) @@ -30,11 +31,11 @@ import ( // ObjectStoreGRPCServer implements the proto-generated ObjectStoreServer interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type ObjectStoreGRPCServer struct { - mux *serverMux + mux *common.ServerMux } func (s *ObjectStoreGRPCServer) getImpl(name string) (velero.ObjectStore, error) { - impl, err := s.mux.getHandler(name) + impl, err := s.mux.GetHandler(name) if err != nil { return nil, err } @@ -52,18 +53,18 @@ func (s *ObjectStoreGRPCServer) getImpl(name string) (velero.ObjectStore, error) // cannot be initialized from the provided config. func (s *ObjectStoreGRPCServer) Init(ctx context.Context, req *proto.ObjectStoreInitRequest) (response *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } if err := impl.Init(req.Config); err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil @@ -73,7 +74,7 @@ func (s *ObjectStoreGRPCServer) Init(ctx context.Context, req *proto.ObjectStore // object storage bucket with the given key. func (s *ObjectStoreGRPCServer) PutObject(stream proto.ObjectStore_PutObjectServer) (err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() @@ -82,12 +83,12 @@ func (s *ObjectStoreGRPCServer) PutObject(stream proto.ObjectStore_PutObjectServ // in our receive method, we'll use `first` on the first call firstChunk, err := stream.Recv() if err != nil { - return newGRPCError(errors.WithStack(err)) + return common.NewGRPCError(errors.WithStack(err)) } impl, err := s.getImpl(firstChunk.Plugin) if err != nil { - return newGRPCError(err) + return common.NewGRPCError(err) } bucket := firstChunk.Bucket @@ -118,11 +119,11 @@ func (s *ObjectStoreGRPCServer) PutObject(stream proto.ObjectStore_PutObjectServ } if err := impl.PutObject(bucket, key, &StreamReadCloser{receive: receive, close: close}); err != nil { - return newGRPCError(err) + return common.NewGRPCError(err) } if err := stream.SendAndClose(&proto.Empty{}); err != nil { - return newGRPCError(errors.WithStack(err)) + return common.NewGRPCError(errors.WithStack(err)) } return nil @@ -131,19 +132,19 @@ func (s *ObjectStoreGRPCServer) PutObject(stream proto.ObjectStore_PutObjectServ // ObjectExists checks if there is an object with the given key in the object storage bucket. func (s *ObjectStoreGRPCServer) ObjectExists(ctx context.Context, req *proto.ObjectExistsRequest) (response *proto.ObjectExistsResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } exists, err := impl.ObjectExists(req.Bucket, req.Key) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.ObjectExistsResponse{Exists: exists}, nil @@ -153,19 +154,19 @@ func (s *ObjectStoreGRPCServer) ObjectExists(ctx context.Context, req *proto.Obj // bucket in object storage. func (s *ObjectStoreGRPCServer) GetObject(req *proto.GetObjectRequest, stream proto.ObjectStore_GetObjectServer) (err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return newGRPCError(err) + return common.NewGRPCError(err) } rdr, err := impl.GetObject(req.Bucket, req.Key) if err != nil { - return newGRPCError(err) + return common.NewGRPCError(err) } defer rdr.Close() @@ -173,14 +174,14 @@ func (s *ObjectStoreGRPCServer) GetObject(req *proto.GetObjectRequest, stream pr for { n, err := rdr.Read(chunk) if err != nil && err != io.EOF { - return newGRPCError(errors.WithStack(err)) + return common.NewGRPCError(errors.WithStack(err)) } if n == 0 { return nil } if err := stream.Send(&proto.Bytes{Data: chunk[0:n]}); err != nil { - return newGRPCError(errors.WithStack(err)) + return common.NewGRPCError(errors.WithStack(err)) } } } @@ -190,19 +191,19 @@ func (s *ObjectStoreGRPCServer) GetObject(req *proto.GetObjectRequest, stream pr // (this is often used to simulate a directory hierarchy in object storage). func (s *ObjectStoreGRPCServer) ListCommonPrefixes(ctx context.Context, req *proto.ListCommonPrefixesRequest) (response *proto.ListCommonPrefixesResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } prefixes, err := impl.ListCommonPrefixes(req.Bucket, req.Prefix, req.Delimiter) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.ListCommonPrefixesResponse{Prefixes: prefixes}, nil @@ -211,19 +212,19 @@ func (s *ObjectStoreGRPCServer) ListCommonPrefixes(ctx context.Context, req *pro // ListObjects gets a list of all objects in bucket that have the same prefix. func (s *ObjectStoreGRPCServer) ListObjects(ctx context.Context, req *proto.ListObjectsRequest) (response *proto.ListObjectsResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } keys, err := impl.ListObjects(req.Bucket, req.Prefix) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.ListObjectsResponse{Keys: keys}, nil @@ -233,18 +234,18 @@ func (s *ObjectStoreGRPCServer) ListObjects(ctx context.Context, req *proto.List // bucket. func (s *ObjectStoreGRPCServer) DeleteObject(ctx context.Context, req *proto.DeleteObjectRequest) (response *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } if err := impl.DeleteObject(req.Bucket, req.Key); err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil @@ -253,19 +254,19 @@ func (s *ObjectStoreGRPCServer) DeleteObject(ctx context.Context, req *proto.Del // CreateSignedURL creates a pre-signed URL for the given bucket and key that expires after ttl. func (s *ObjectStoreGRPCServer) CreateSignedURL(ctx context.Context, req *proto.CreateSignedURLRequest) (response *proto.CreateSignedURLResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } url, err := impl.CreateSignedURL(req.Bucket, req.Key, time.Duration(req.Ttl)) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.CreateSignedURLResponse{Url: url}, nil diff --git a/pkg/plugin/framework/plugin_lister.go b/pkg/plugin/framework/plugin_lister.go index aed7de77d4..e658ae8a21 100644 --- a/pkg/plugin/framework/plugin_lister.go +++ b/pkg/plugin/framework/plugin_lister.go @@ -22,13 +22,14 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) // PluginIdentifier uniquely identifies a plugin by command, kind, and name. type PluginIdentifier struct { Command string - Kind PluginKind + Kind common.PluginKind Name string } @@ -87,13 +88,13 @@ func (c *PluginListerGRPCClient) ListPlugins() ([]PluginIdentifier, error) { ret := make([]PluginIdentifier, len(resp.Plugins)) for i, id := range resp.Plugins { - if _, ok := AllPluginKinds()[id.Kind]; !ok { + if _, ok := common.AllPluginKinds()[id.Kind]; !ok { return nil, errors.Errorf("invalid plugin kind: %s", id.Kind) } ret[i] = PluginIdentifier{ Command: id.Command, - Kind: PluginKind(id.Kind), + Kind: common.PluginKind(id.Kind), Name: id.Name, } } @@ -126,7 +127,7 @@ func (s *PluginListerGRPCServer) ListPlugins(ctx context.Context, req *proto.Emp plugins := make([]*proto.PluginIdentifier, len(list)) for i, id := range list { - if _, ok := AllPluginKinds()[id.Kind.String()]; !ok { + if _, ok := common.AllPluginKinds()[id.Kind.String()]; !ok { return nil, errors.Errorf("invalid plugin kind: %s", id.Kind) } diff --git a/pkg/plugin/framework/restore_item_action.go b/pkg/plugin/framework/restore_item_action.go index 255c67ae88..b5a3d41c16 100644 --- a/pkg/plugin/framework/restore_item_action.go +++ b/pkg/plugin/framework/restore_item_action.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) @@ -29,16 +30,16 @@ import ( // interface. type RestoreItemActionPlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns a RestoreItemAction gRPC client. func (p *RestoreItemActionPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newRestoreItemActionGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newRestoreItemActionGRPCClient), nil } // GRPCServer registers a RestoreItemAction gRPC server. func (p *RestoreItemActionPlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterRestoreItemActionServer(server, &RestoreItemActionGRPCServer{mux: p.serverMux}) + proto.RegisterRestoreItemActionServer(server, &RestoreItemActionGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/restore_item_action_client.go b/pkg/plugin/framework/restore_item_action_client.go index 33d7fff3ae..35bafa0893 100644 --- a/pkg/plugin/framework/restore_item_action_client.go +++ b/pkg/plugin/framework/restore_item_action_client.go @@ -25,37 +25,39 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) -var _ velero.RestoreItemAction = &RestoreItemActionGRPCClient{} +var _ riav1.RestoreItemAction = &RestoreItemActionGRPCClient{} // NewRestoreItemActionPlugin constructs a RestoreItemActionPlugin. -func NewRestoreItemActionPlugin(options ...PluginOption) *RestoreItemActionPlugin { +func NewRestoreItemActionPlugin(options ...common.PluginOption) *RestoreItemActionPlugin { return &RestoreItemActionPlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } // RestoreItemActionGRPCClient implements the backup/ItemAction interface and uses a // gRPC client to make calls to the plugin server. type RestoreItemActionGRPCClient struct { - *clientBase + *common.ClientBase grpcClient proto.RestoreItemActionClient } -func newRestoreItemActionGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newRestoreItemActionGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &RestoreItemActionGRPCClient{ - clientBase: base, + ClientBase: base, grpcClient: proto.NewRestoreItemActionClient(clientConn), } } func (c *RestoreItemActionGRPCClient) AppliesTo() (velero.ResourceSelector, error) { - res, err := c.grpcClient.AppliesTo(context.Background(), &proto.RestoreItemActionAppliesToRequest{Plugin: c.plugin}) + res, err := c.grpcClient.AppliesTo(context.Background(), &proto.RestoreItemActionAppliesToRequest{Plugin: c.Plugin}) if err != nil { - return velero.ResourceSelector{}, fromGRPCError(err) + return velero.ResourceSelector{}, common.FromGRPCError(err) } if res.ResourceSelector == nil { @@ -71,7 +73,7 @@ func (c *RestoreItemActionGRPCClient) AppliesTo() (velero.ResourceSelector, erro }, nil } -func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (c *RestoreItemActionGRPCClient) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { itemJSON, err := json.Marshal(input.Item.UnstructuredContent()) if err != nil { return nil, errors.WithStack(err) @@ -88,7 +90,7 @@ func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExe } req := &proto.RestoreItemActionExecuteRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Item: itemJSON, ItemFromBackup: itemFromBackupJSON, Restore: restoreJSON, @@ -96,7 +98,7 @@ func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExe res, err := c.grpcClient.Execute(context.Background(), req) if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } var updatedItem unstructured.Unstructured @@ -118,7 +120,7 @@ func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExe additionalItems = append(additionalItems, newItem) } - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: &updatedItem, AdditionalItems: additionalItems, SkipRestore: res.SkipRestore, diff --git a/pkg/plugin/framework/restore_item_action_server.go b/pkg/plugin/framework/restore_item_action_server.go index 340559a777..924929c975 100644 --- a/pkg/plugin/framework/restore_item_action_server.go +++ b/pkg/plugin/framework/restore_item_action_server.go @@ -24,23 +24,25 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // RestoreItemActionGRPCServer implements the proto-generated RestoreItemActionServer interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type RestoreItemActionGRPCServer struct { - mux *serverMux + mux *common.ServerMux } -func (s *RestoreItemActionGRPCServer) getImpl(name string) (velero.RestoreItemAction, error) { - impl, err := s.mux.getHandler(name) +func (s *RestoreItemActionGRPCServer) getImpl(name string) (riav1.RestoreItemAction, error) { + impl, err := s.mux.GetHandler(name) if err != nil { return nil, err } - itemAction, ok := impl.(velero.RestoreItemAction) + itemAction, ok := impl.(riav1.RestoreItemAction) if !ok { return nil, errors.Errorf("%T is not a restore item action", impl) } @@ -50,23 +52,23 @@ func (s *RestoreItemActionGRPCServer) getImpl(name string) (velero.RestoreItemAc func (s *RestoreItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto.RestoreItemActionAppliesToRequest) (response *proto.RestoreItemActionAppliesToResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } resourceSelector, err := impl.AppliesTo() if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.RestoreItemActionAppliesToResponse{ - &proto.ResourceSelector{ + ResourceSelector: &proto.ResourceSelector{ IncludedNamespaces: resourceSelector.IncludedNamespaces, ExcludedNamespaces: resourceSelector.ExcludedNamespaces, IncludedResources: resourceSelector.IncludedResources, @@ -78,14 +80,14 @@ func (s *RestoreItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto. func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.RestoreItemActionExecuteRequest) (response *proto.RestoreItemActionExecuteResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var ( @@ -95,24 +97,24 @@ func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.Re ) if err := json.Unmarshal(req.Item, &item); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := json.Unmarshal(req.ItemFromBackup, &itemFromBackup); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } if err := json.Unmarshal(req.Restore, &restoreObj); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } - executeOutput, err := impl.Execute(&velero.RestoreItemActionExecuteInput{ + executeOutput, err := impl.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &item, ItemFromBackup: &itemFromBackup, Restore: &restoreObj, }) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } // If the plugin implementation returned a nil updateItem (meaning no modifications), reset updatedItem to the @@ -123,7 +125,7 @@ func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.Re } else { updatedItemJSON, err = json.Marshal(executeOutput.UpdatedItem.UnstructuredContent()) if err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } } diff --git a/pkg/plugin/framework/server.go b/pkg/plugin/framework/server.go index b25ef341de..20c5e48e00 100644 --- a/pkg/plugin/framework/server.go +++ b/pkg/plugin/framework/server.go @@ -25,6 +25,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -40,43 +41,43 @@ type Server interface { // RegisterBackupItemAction registers a backup item action. Accepted format // for the plugin name is /. - RegisterBackupItemAction(pluginName string, initializer HandlerInitializer) Server + RegisterBackupItemAction(pluginName string, initializer common.HandlerInitializer) Server // RegisterBackupItemActions registers multiple backup item actions. - RegisterBackupItemActions(map[string]HandlerInitializer) Server + RegisterBackupItemActions(map[string]common.HandlerInitializer) Server // RegisterVolumeSnapshotter registers a volume snapshotter. Accepted format // for the plugin name is /. - RegisterVolumeSnapshotter(pluginName string, initializer HandlerInitializer) Server + RegisterVolumeSnapshotter(pluginName string, initializer common.HandlerInitializer) Server // RegisterVolumeSnapshotters registers multiple volume snapshotters. - RegisterVolumeSnapshotters(map[string]HandlerInitializer) Server + RegisterVolumeSnapshotters(map[string]common.HandlerInitializer) Server // RegisterObjectStore registers an object store. Accepted format // for the plugin name is /. - RegisterObjectStore(pluginName string, initializer HandlerInitializer) Server + RegisterObjectStore(pluginName string, initializer common.HandlerInitializer) Server // RegisterObjectStores registers multiple object stores. - RegisterObjectStores(map[string]HandlerInitializer) Server + RegisterObjectStores(map[string]common.HandlerInitializer) Server // RegisterRestoreItemAction registers a restore item action. Accepted format // for the plugin name is /. - RegisterRestoreItemAction(pluginName string, initializer HandlerInitializer) Server + RegisterRestoreItemAction(pluginName string, initializer common.HandlerInitializer) Server // RegisterRestoreItemActions registers multiple restore item actions. - RegisterRestoreItemActions(map[string]HandlerInitializer) Server + RegisterRestoreItemActions(map[string]common.HandlerInitializer) Server // RegisterDeleteItemAction registers a delete item action. Accepted format // for the plugin name is /. - RegisterDeleteItemAction(pluginName string, initializer HandlerInitializer) Server + RegisterDeleteItemAction(pluginName string, initializer common.HandlerInitializer) Server // RegisterDeleteItemActions registers multiple Delete item actions. - RegisterDeleteItemActions(map[string]HandlerInitializer) Server + RegisterDeleteItemActions(map[string]common.HandlerInitializer) Server - RegisterItemSnapshotter(pluginName string, initializer HandlerInitializer) Server + RegisterItemSnapshotter(pluginName string, initializer common.HandlerInitializer) Server // RegisterItemSnapshotters registers multiple Item Snapshotters - RegisterItemSnapshotters(map[string]HandlerInitializer) Server + RegisterItemSnapshotters(map[string]common.HandlerInitializer) Server // Server runs the plugin server. Serve() @@ -102,12 +103,12 @@ func NewServer() Server { return &server{ log: log, logLevelFlag: logging.LogLevelFlag(log.Level), - backupItemAction: NewBackupItemActionPlugin(serverLogger(log)), - volumeSnapshotter: NewVolumeSnapshotterPlugin(serverLogger(log)), - objectStore: NewObjectStorePlugin(serverLogger(log)), - restoreItemAction: NewRestoreItemActionPlugin(serverLogger(log)), - deleteItemAction: NewDeleteItemActionPlugin(serverLogger(log)), - itemSnapshotter: NewItemSnapshotterPlugin(serverLogger(log)), + backupItemAction: NewBackupItemActionPlugin(common.ServerLogger(log)), + volumeSnapshotter: NewVolumeSnapshotterPlugin(common.ServerLogger(log)), + objectStore: NewObjectStorePlugin(common.ServerLogger(log)), + restoreItemAction: NewRestoreItemActionPlugin(common.ServerLogger(log)), + deleteItemAction: NewDeleteItemActionPlugin(common.ServerLogger(log)), + itemSnapshotter: NewItemSnapshotterPlugin(common.ServerLogger(log)), } } @@ -119,71 +120,71 @@ func (s *server) BindFlags(flags *pflag.FlagSet) Server { return s } -func (s *server) RegisterBackupItemAction(name string, initializer HandlerInitializer) Server { - s.backupItemAction.register(name, initializer) +func (s *server) RegisterBackupItemAction(name string, initializer common.HandlerInitializer) Server { + s.backupItemAction.Register(name, initializer) return s } -func (s *server) RegisterBackupItemActions(m map[string]HandlerInitializer) Server { +func (s *server) RegisterBackupItemActions(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterBackupItemAction(name, m[name]) } return s } -func (s *server) RegisterVolumeSnapshotter(name string, initializer HandlerInitializer) Server { - s.volumeSnapshotter.register(name, initializer) +func (s *server) RegisterVolumeSnapshotter(name string, initializer common.HandlerInitializer) Server { + s.volumeSnapshotter.Register(name, initializer) return s } -func (s *server) RegisterVolumeSnapshotters(m map[string]HandlerInitializer) Server { +func (s *server) RegisterVolumeSnapshotters(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterVolumeSnapshotter(name, m[name]) } return s } -func (s *server) RegisterObjectStore(name string, initializer HandlerInitializer) Server { - s.objectStore.register(name, initializer) +func (s *server) RegisterObjectStore(name string, initializer common.HandlerInitializer) Server { + s.objectStore.Register(name, initializer) return s } -func (s *server) RegisterObjectStores(m map[string]HandlerInitializer) Server { +func (s *server) RegisterObjectStores(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterObjectStore(name, m[name]) } return s } -func (s *server) RegisterRestoreItemAction(name string, initializer HandlerInitializer) Server { - s.restoreItemAction.register(name, initializer) +func (s *server) RegisterRestoreItemAction(name string, initializer common.HandlerInitializer) Server { + s.restoreItemAction.Register(name, initializer) return s } -func (s *server) RegisterRestoreItemActions(m map[string]HandlerInitializer) Server { +func (s *server) RegisterRestoreItemActions(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterRestoreItemAction(name, m[name]) } return s } -func (s *server) RegisterDeleteItemAction(name string, initializer HandlerInitializer) Server { - s.deleteItemAction.register(name, initializer) +func (s *server) RegisterDeleteItemAction(name string, initializer common.HandlerInitializer) Server { + s.deleteItemAction.Register(name, initializer) return s } -func (s *server) RegisterDeleteItemActions(m map[string]HandlerInitializer) Server { +func (s *server) RegisterDeleteItemActions(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterDeleteItemAction(name, m[name]) } return s } -func (s *server) RegisterItemSnapshotter(name string, initializer HandlerInitializer) Server { - s.itemSnapshotter.register(name, initializer) +func (s *server) RegisterItemSnapshotter(name string, initializer common.HandlerInitializer) Server { + s.itemSnapshotter.Register(name, initializer) return s } -func (s *server) RegisterItemSnapshotters(m map[string]HandlerInitializer) Server { +func (s *server) RegisterItemSnapshotters(m map[string]common.HandlerInitializer) Server { for name := range m { s.RegisterItemSnapshotter(name, m[name]) } @@ -191,10 +192,10 @@ func (s *server) RegisterItemSnapshotters(m map[string]HandlerInitializer) Serve } // getNames returns a list of PluginIdentifiers registered with plugin. -func getNames(command string, kind PluginKind, plugin Interface) []PluginIdentifier { +func getNames(command string, kind common.PluginKind, plugin Interface) []PluginIdentifier { var pluginIdentifiers []PluginIdentifier - for _, name := range plugin.names() { + for _, name := range plugin.Names() { id := PluginIdentifier{Command: command, Kind: kind, Name: name} pluginIdentifiers = append(pluginIdentifiers, id) } @@ -214,25 +215,25 @@ func (s *server) Serve() { command := os.Args[0] var pluginIdentifiers []PluginIdentifier - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindBackupItemAction, s.backupItemAction)...) - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindVolumeSnapshotter, s.volumeSnapshotter)...) - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindObjectStore, s.objectStore)...) - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindRestoreItemAction, s.restoreItemAction)...) - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindDeleteItemAction, s.deleteItemAction)...) - pluginIdentifiers = append(pluginIdentifiers, getNames(command, PluginKindItemSnapshotter, s.itemSnapshotter)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindBackupItemAction, s.backupItemAction)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindVolumeSnapshotter, s.volumeSnapshotter)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindObjectStore, s.objectStore)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindRestoreItemAction, s.restoreItemAction)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindDeleteItemAction, s.deleteItemAction)...) + pluginIdentifiers = append(pluginIdentifiers, getNames(command, common.PluginKindItemSnapshotter, s.itemSnapshotter)...) pluginLister := NewPluginLister(pluginIdentifiers...) plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: Handshake(), Plugins: map[string]plugin.Plugin{ - string(PluginKindBackupItemAction): s.backupItemAction, - string(PluginKindVolumeSnapshotter): s.volumeSnapshotter, - string(PluginKindObjectStore): s.objectStore, - string(PluginKindPluginLister): NewPluginListerPlugin(pluginLister), - string(PluginKindRestoreItemAction): s.restoreItemAction, - string(PluginKindDeleteItemAction): s.deleteItemAction, - string(PluginKindItemSnapshotter): s.itemSnapshotter, + string(common.PluginKindBackupItemAction): s.backupItemAction, + string(common.PluginKindVolumeSnapshotter): s.volumeSnapshotter, + string(common.PluginKindObjectStore): s.objectStore, + string(common.PluginKindPluginLister): NewPluginListerPlugin(pluginLister), + string(common.PluginKindRestoreItemAction): s.restoreItemAction, + string(common.PluginKindDeleteItemAction): s.deleteItemAction, + string(common.PluginKindItemSnapshotter): s.itemSnapshotter, }, GRPCServer: plugin.DefaultGRPCServer, }) diff --git a/pkg/plugin/framework/volume_snapshotter.go b/pkg/plugin/framework/volume_snapshotter.go index 50602b2c05..566fc1eb72 100644 --- a/pkg/plugin/framework/volume_snapshotter.go +++ b/pkg/plugin/framework/volume_snapshotter.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) @@ -29,16 +30,16 @@ import ( // interface. type VolumeSnapshotterPlugin struct { plugin.NetRPCUnsupportedPlugin - *pluginBase + *common.PluginBase } // GRPCClient returns a VolumeSnapshotter gRPC client. func (p *VolumeSnapshotterPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { - return newClientDispenser(p.clientLogger, clientConn, newVolumeSnapshotterGRPCClient), nil + return common.NewClientDispenser(p.ClientLogger, clientConn, newVolumeSnapshotterGRPCClient), nil } // GRPCServer registers a VolumeSnapshotter gRPC server. func (p *VolumeSnapshotterPlugin) GRPCServer(_ *plugin.GRPCBroker, server *grpc.Server) error { - proto.RegisterVolumeSnapshotterServer(server, &VolumeSnapshotterGRPCServer{mux: p.serverMux}) + proto.RegisterVolumeSnapshotterServer(server, &VolumeSnapshotterGRPCServer{mux: p.ServerMux}) return nil } diff --git a/pkg/plugin/framework/volume_snapshotter_client.go b/pkg/plugin/framework/volume_snapshotter_client.go index 53ac58d25b..a359046177 100644 --- a/pkg/plugin/framework/volume_snapshotter_client.go +++ b/pkg/plugin/framework/volume_snapshotter_client.go @@ -25,26 +25,27 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" ) // NewVolumeSnapshotterPlugin constructs a VolumeSnapshotterPlugin. -func NewVolumeSnapshotterPlugin(options ...PluginOption) *VolumeSnapshotterPlugin { +func NewVolumeSnapshotterPlugin(options ...common.PluginOption) *VolumeSnapshotterPlugin { return &VolumeSnapshotterPlugin{ - pluginBase: newPluginBase(options...), + PluginBase: common.NewPluginBase(options...), } } // VolumeSnapshotterGRPCClient implements the cloudprovider.VolumeSnapshotter interface and uses a // gRPC client to make calls to the plugin server. type VolumeSnapshotterGRPCClient struct { - *clientBase + *common.ClientBase grpcClient proto.VolumeSnapshotterClient } -func newVolumeSnapshotterGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{} { +func newVolumeSnapshotterGRPCClient(base *common.ClientBase, clientConn *grpc.ClientConn) interface{} { return &VolumeSnapshotterGRPCClient{ - clientBase: base, + ClientBase: base, grpcClient: proto.NewVolumeSnapshotterClient(clientConn), } } @@ -54,12 +55,12 @@ func newVolumeSnapshotterGRPCClient(base *clientBase, clientConn *grpc.ClientCon // cannot be initialized from the provided config. func (c *VolumeSnapshotterGRPCClient) Init(config map[string]string) error { req := &proto.VolumeSnapshotterInitRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, Config: config, } if _, err := c.grpcClient.Init(context.Background(), req); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil @@ -69,7 +70,7 @@ func (c *VolumeSnapshotterGRPCClient) Init(config map[string]string) error { // and with the specified type and IOPS (if using provisioned IOPS). func (c *VolumeSnapshotterGRPCClient) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) { req := &proto.CreateVolumeRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, SnapshotID: snapshotID, VolumeType: volumeType, VolumeAZ: volumeAZ, @@ -83,7 +84,7 @@ func (c *VolumeSnapshotterGRPCClient) CreateVolumeFromSnapshot(snapshotID, volum res, err := c.grpcClient.CreateVolumeFromSnapshot(context.Background(), req) if err != nil { - return "", fromGRPCError(err) + return "", common.FromGRPCError(err) } return res.VolumeID, nil @@ -93,14 +94,14 @@ func (c *VolumeSnapshotterGRPCClient) CreateVolumeFromSnapshot(snapshotID, volum // volume. func (c *VolumeSnapshotterGRPCClient) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { req := &proto.GetVolumeInfoRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, VolumeID: volumeID, VolumeAZ: volumeAZ, } res, err := c.grpcClient.GetVolumeInfo(context.Background(), req) if err != nil { - return "", nil, fromGRPCError(err) + return "", nil, common.FromGRPCError(err) } var iops *int64 @@ -115,7 +116,7 @@ func (c *VolumeSnapshotterGRPCClient) GetVolumeInfo(volumeID, volumeAZ string) ( // set of tags to the snapshot. func (c *VolumeSnapshotterGRPCClient) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) { req := &proto.CreateSnapshotRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, VolumeID: volumeID, VolumeAZ: volumeAZ, Tags: tags, @@ -123,7 +124,7 @@ func (c *VolumeSnapshotterGRPCClient) CreateSnapshot(volumeID, volumeAZ string, res, err := c.grpcClient.CreateSnapshot(context.Background(), req) if err != nil { - return "", fromGRPCError(err) + return "", common.FromGRPCError(err) } return res.SnapshotID, nil @@ -132,12 +133,12 @@ func (c *VolumeSnapshotterGRPCClient) CreateSnapshot(volumeID, volumeAZ string, // DeleteSnapshot deletes the specified volume snapshot. func (c *VolumeSnapshotterGRPCClient) DeleteSnapshot(snapshotID string) error { req := &proto.DeleteSnapshotRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, SnapshotID: snapshotID, } if _, err := c.grpcClient.DeleteSnapshot(context.Background(), req); err != nil { - return fromGRPCError(err) + return common.FromGRPCError(err) } return nil @@ -150,13 +151,13 @@ func (c *VolumeSnapshotterGRPCClient) GetVolumeID(pv runtime.Unstructured) (stri } req := &proto.GetVolumeIDRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, PersistentVolume: encodedPV, } resp, err := c.grpcClient.GetVolumeID(context.Background(), req) if err != nil { - return "", fromGRPCError(err) + return "", common.FromGRPCError(err) } return resp.VolumeID, nil @@ -169,14 +170,14 @@ func (c *VolumeSnapshotterGRPCClient) SetVolumeID(pv runtime.Unstructured, volum } req := &proto.SetVolumeIDRequest{ - Plugin: c.plugin, + Plugin: c.Plugin, PersistentVolume: encodedPV, VolumeID: volumeID, } resp, err := c.grpcClient.SetVolumeID(context.Background(), req) if err != nil { - return nil, fromGRPCError(err) + return nil, common.FromGRPCError(err) } var updatedPV unstructured.Unstructured diff --git a/pkg/plugin/framework/volume_snapshotter_server.go b/pkg/plugin/framework/volume_snapshotter_server.go index bde371c93f..99bdea03a8 100644 --- a/pkg/plugin/framework/volume_snapshotter_server.go +++ b/pkg/plugin/framework/volume_snapshotter_server.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,23 +23,24 @@ import ( "golang.org/x/net/context" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" proto "github.com/vmware-tanzu/velero/pkg/plugin/generated" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" ) // VolumeSnapshotterGRPCServer implements the proto-generated VolumeSnapshotterServer interface, and accepts // gRPC calls and forwards them to an implementation of the pluggable interface. type VolumeSnapshotterGRPCServer struct { - mux *serverMux + mux *common.ServerMux } -func (s *VolumeSnapshotterGRPCServer) getImpl(name string) (velero.VolumeSnapshotter, error) { - impl, err := s.mux.getHandler(name) +func (s *VolumeSnapshotterGRPCServer) getImpl(name string) (vsv1.VolumeSnapshotter, error) { + impl, err := s.mux.GetHandler(name) if err != nil { return nil, err } - volumeSnapshotter, ok := impl.(velero.VolumeSnapshotter) + volumeSnapshotter, ok := impl.(vsv1.VolumeSnapshotter) if !ok { return nil, errors.Errorf("%T is not a volume snapshotter", impl) } @@ -52,18 +53,18 @@ func (s *VolumeSnapshotterGRPCServer) getImpl(name string) (velero.VolumeSnapsho // cannot be initialized from the provided config. func (s *VolumeSnapshotterGRPCServer) Init(ctx context.Context, req *proto.VolumeSnapshotterInitRequest) (response *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } if err := impl.Init(req.Config); err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil @@ -73,14 +74,14 @@ func (s *VolumeSnapshotterGRPCServer) Init(ctx context.Context, req *proto.Volum // and with the specified type and IOPS (if using provisioned IOPS). func (s *VolumeSnapshotterGRPCServer) CreateVolumeFromSnapshot(ctx context.Context, req *proto.CreateVolumeRequest) (response *proto.CreateVolumeResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } snapshotID := req.SnapshotID @@ -94,7 +95,7 @@ func (s *VolumeSnapshotterGRPCServer) CreateVolumeFromSnapshot(ctx context.Conte volumeID, err := impl.CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ, iops) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.CreateVolumeResponse{VolumeID: volumeID}, nil @@ -104,19 +105,19 @@ func (s *VolumeSnapshotterGRPCServer) CreateVolumeFromSnapshot(ctx context.Conte // volume. func (s *VolumeSnapshotterGRPCServer) GetVolumeInfo(ctx context.Context, req *proto.GetVolumeInfoRequest) (response *proto.GetVolumeInfoResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } volumeType, iops, err := impl.GetVolumeInfo(req.VolumeID, req.VolumeAZ) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } res := &proto.GetVolumeInfoResponse{ @@ -134,19 +135,19 @@ func (s *VolumeSnapshotterGRPCServer) GetVolumeInfo(ctx context.Context, req *pr // set of tags to the snapshot. func (s *VolumeSnapshotterGRPCServer) CreateSnapshot(ctx context.Context, req *proto.CreateSnapshotRequest) (response *proto.CreateSnapshotResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } snapshotID, err := impl.CreateSnapshot(req.VolumeID, req.VolumeAZ, req.Tags) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.CreateSnapshotResponse{SnapshotID: snapshotID}, nil @@ -155,18 +156,18 @@ func (s *VolumeSnapshotterGRPCServer) CreateSnapshot(ctx context.Context, req *p // DeleteSnapshot deletes the specified volume snapshot. func (s *VolumeSnapshotterGRPCServer) DeleteSnapshot(ctx context.Context, req *proto.DeleteSnapshotRequest) (response *proto.Empty, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } if err := impl.DeleteSnapshot(req.SnapshotID); err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.Empty{}, nil @@ -174,25 +175,25 @@ func (s *VolumeSnapshotterGRPCServer) DeleteSnapshot(ctx context.Context, req *p func (s *VolumeSnapshotterGRPCServer) GetVolumeID(ctx context.Context, req *proto.GetVolumeIDRequest) (response *proto.GetVolumeIDResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var pv unstructured.Unstructured if err := json.Unmarshal(req.PersistentVolume, &pv); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } volumeID, err := impl.GetVolumeID(&pv) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.GetVolumeIDResponse{VolumeID: volumeID}, nil @@ -200,29 +201,29 @@ func (s *VolumeSnapshotterGRPCServer) GetVolumeID(ctx context.Context, req *prot func (s *VolumeSnapshotterGRPCServer) SetVolumeID(ctx context.Context, req *proto.SetVolumeIDRequest) (response *proto.SetVolumeIDResponse, err error) { defer func() { - if recoveredErr := handlePanic(recover()); recoveredErr != nil { + if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr } }() impl, err := s.getImpl(req.Plugin) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } var pv unstructured.Unstructured if err := json.Unmarshal(req.PersistentVolume, &pv); err != nil { - return nil, newGRPCError(errors.WithStack(err)) + return nil, common.NewGRPCError(errors.WithStack(err)) } updatedPV, err := impl.SetVolumeID(&pv, req.VolumeID) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } updatedPVBytes, err := json.Marshal(updatedPV.UnstructuredContent()) if err != nil { - return nil, newGRPCError(err) + return nil, common.NewGRPCError(err) } return &proto.SetVolumeIDResponse{PersistentVolume: updatedPVBytes}, nil diff --git a/pkg/plugin/generated/BackupItemAction.pb.go b/pkg/plugin/generated/BackupItemAction.pb.go index 937da01d05..5b216cc6f0 100644 --- a/pkg/plugin/generated/BackupItemAction.pb.go +++ b/pkg/plugin/generated/BackupItemAction.pb.go @@ -1,223 +1,431 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: BackupItemAction.proto -/* -Package generated is a generated protocol buffer package. - -It is generated from these files: - BackupItemAction.proto - DeleteItemAction.proto - ItemSnapshotter.proto - ObjectStore.proto - PluginLister.proto - RestoreItemAction.proto - Shared.proto - VolumeSnapshotter.proto - -It has these top-level messages: - ExecuteRequest - ExecuteResponse - BackupItemActionAppliesToRequest - BackupItemActionAppliesToResponse - DeleteItemActionExecuteRequest - DeleteItemActionAppliesToRequest - DeleteItemActionAppliesToResponse - ItemSnapshotterAppliesToRequest - ItemSnapshotterAppliesToResponse - AlsoHandlesRequest - AlsoHandlesResponse - SnapshotItemRequest - SnapshotItemResponse - ProgressRequest - ProgressResponse - DeleteItemSnapshotRequest - CreateItemFromSnapshotRequest - CreateItemFromSnapshotResponse - ItemSnapshotterInitRequest - PutObjectRequest - ObjectExistsRequest - ObjectExistsResponse - GetObjectRequest - Bytes - ListCommonPrefixesRequest - ListCommonPrefixesResponse - ListObjectsRequest - ListObjectsResponse - DeleteObjectRequest - CreateSignedURLRequest - CreateSignedURLResponse - ObjectStoreInitRequest - PluginIdentifier - ListPluginsResponse - RestoreItemActionExecuteRequest - RestoreItemActionExecuteResponse - RestoreItemActionAppliesToRequest - RestoreItemActionAppliesToResponse - Empty - Stack - StackFrame - ResourceIdentifier - ResourceSelector - CreateVolumeRequest - CreateVolumeResponse - GetVolumeInfoRequest - GetVolumeInfoResponse - CreateSnapshotRequest - CreateSnapshotResponse - DeleteSnapshotRequest - GetVolumeIDRequest - GetVolumeIDResponse - SetVolumeIDRequest - SetVolumeIDResponse - VolumeSnapshotterInitRequest -*/ package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type ExecuteRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` Backup []byte `protobuf:"bytes,3,opt,name=backup,proto3" json:"backup,omitempty"` } -func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } -func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } -func (*ExecuteRequest) ProtoMessage() {} -func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *ExecuteRequest) Reset() { + *x = ExecuteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_BackupItemAction_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *ExecuteRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (*ExecuteRequest) ProtoMessage() {} + +func (x *ExecuteRequest) ProtoReflect() protoreflect.Message { + mi := &file_BackupItemAction_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteRequest.ProtoReflect.Descriptor instead. +func (*ExecuteRequest) Descriptor() ([]byte, []int) { + return file_BackupItemAction_proto_rawDescGZIP(), []int{0} +} + +func (x *ExecuteRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ExecuteRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *ExecuteRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *ExecuteRequest) GetBackup() []byte { - if m != nil { - return m.Backup +func (x *ExecuteRequest) GetBackup() []byte { + if x != nil { + return x.Backup } return nil } type ExecuteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` - AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems" json:"additionalItems,omitempty"` + AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems,proto3" json:"additionalItems,omitempty"` +} + +func (x *ExecuteResponse) Reset() { + *x = ExecuteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_BackupItemAction_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } -func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } -func (*ExecuteResponse) ProtoMessage() {} -func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *ExecuteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteResponse) ProtoMessage() {} + +func (x *ExecuteResponse) ProtoReflect() protoreflect.Message { + mi := &file_BackupItemAction_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteResponse.ProtoReflect.Descriptor instead. +func (*ExecuteResponse) Descriptor() ([]byte, []int) { + return file_BackupItemAction_proto_rawDescGZIP(), []int{1} +} -func (m *ExecuteResponse) GetItem() []byte { - if m != nil { - return m.Item +func (x *ExecuteResponse) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *ExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { - if m != nil { - return m.AdditionalItems +func (x *ExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { + if x != nil { + return x.AdditionalItems } return nil } type BackupItemActionAppliesToRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` +} + +func (x *BackupItemActionAppliesToRequest) Reset() { + *x = BackupItemActionAppliesToRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_BackupItemAction_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BackupItemActionAppliesToRequest) Reset() { *m = BackupItemActionAppliesToRequest{} } -func (m *BackupItemActionAppliesToRequest) String() string { return proto.CompactTextString(m) } -func (*BackupItemActionAppliesToRequest) ProtoMessage() {} +func (x *BackupItemActionAppliesToRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupItemActionAppliesToRequest) ProtoMessage() {} + +func (x *BackupItemActionAppliesToRequest) ProtoReflect() protoreflect.Message { + mi := &file_BackupItemAction_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupItemActionAppliesToRequest.ProtoReflect.Descriptor instead. func (*BackupItemActionAppliesToRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2} + return file_BackupItemAction_proto_rawDescGZIP(), []int{2} } -func (m *BackupItemActionAppliesToRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *BackupItemActionAppliesToRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } type BackupItemActionAppliesToResponse struct { - ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector" json:"ResourceSelector,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector,proto3" json:"ResourceSelector,omitempty"` } -func (m *BackupItemActionAppliesToResponse) Reset() { *m = BackupItemActionAppliesToResponse{} } -func (m *BackupItemActionAppliesToResponse) String() string { return proto.CompactTextString(m) } -func (*BackupItemActionAppliesToResponse) ProtoMessage() {} +func (x *BackupItemActionAppliesToResponse) Reset() { + *x = BackupItemActionAppliesToResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_BackupItemAction_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackupItemActionAppliesToResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupItemActionAppliesToResponse) ProtoMessage() {} + +func (x *BackupItemActionAppliesToResponse) ProtoReflect() protoreflect.Message { + mi := &file_BackupItemAction_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupItemActionAppliesToResponse.ProtoReflect.Descriptor instead. func (*BackupItemActionAppliesToResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{3} + return file_BackupItemAction_proto_rawDescGZIP(), []int{3} } -func (m *BackupItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { - if m != nil { - return m.ResourceSelector +func (x *BackupItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { + if x != nil { + return x.ResourceSelector } return nil } -func init() { - proto.RegisterType((*ExecuteRequest)(nil), "generated.ExecuteRequest") - proto.RegisterType((*ExecuteResponse)(nil), "generated.ExecuteResponse") - proto.RegisterType((*BackupItemActionAppliesToRequest)(nil), "generated.BackupItemActionAppliesToRequest") - proto.RegisterType((*BackupItemActionAppliesToResponse)(nil), "generated.BackupItemActionAppliesToResponse") +var File_BackupItemAction_proto protoreflect.FileDescriptor + +var file_BackupItemAction_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x1a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x54, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x69, + 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, + 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x6e, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, + 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, 0x47, + 0x0a, 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x20, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, + 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x22, 0x6c, 0x0a, 0x21, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, + 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x32, 0xbc, 0x01, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x6d, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, + 0x73, 0x54, 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, + 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, + 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, 0x65, 0x6c, 0x65, + 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_BackupItemAction_proto_rawDescOnce sync.Once + file_BackupItemAction_proto_rawDescData = file_BackupItemAction_proto_rawDesc +) + +func file_BackupItemAction_proto_rawDescGZIP() []byte { + file_BackupItemAction_proto_rawDescOnce.Do(func() { + file_BackupItemAction_proto_rawDescData = protoimpl.X.CompressGZIP(file_BackupItemAction_proto_rawDescData) + }) + return file_BackupItemAction_proto_rawDescData +} + +var file_BackupItemAction_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_BackupItemAction_proto_goTypes = []interface{}{ + (*ExecuteRequest)(nil), // 0: generated.ExecuteRequest + (*ExecuteResponse)(nil), // 1: generated.ExecuteResponse + (*BackupItemActionAppliesToRequest)(nil), // 2: generated.BackupItemActionAppliesToRequest + (*BackupItemActionAppliesToResponse)(nil), // 3: generated.BackupItemActionAppliesToResponse + (*ResourceIdentifier)(nil), // 4: generated.ResourceIdentifier + (*ResourceSelector)(nil), // 5: generated.ResourceSelector +} +var file_BackupItemAction_proto_depIdxs = []int32{ + 4, // 0: generated.ExecuteResponse.additionalItems:type_name -> generated.ResourceIdentifier + 5, // 1: generated.BackupItemActionAppliesToResponse.ResourceSelector:type_name -> generated.ResourceSelector + 2, // 2: generated.BackupItemAction.AppliesTo:input_type -> generated.BackupItemActionAppliesToRequest + 0, // 3: generated.BackupItemAction.Execute:input_type -> generated.ExecuteRequest + 3, // 4: generated.BackupItemAction.AppliesTo:output_type -> generated.BackupItemActionAppliesToResponse + 1, // 5: generated.BackupItemAction.Execute:output_type -> generated.ExecuteResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_BackupItemAction_proto_init() } +func file_BackupItemAction_proto_init() { + if File_BackupItemAction_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_BackupItemAction_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_BackupItemAction_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_BackupItemAction_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupItemActionAppliesToRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_BackupItemAction_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupItemActionAppliesToResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_BackupItemAction_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_BackupItemAction_proto_goTypes, + DependencyIndexes: file_BackupItemAction_proto_depIdxs, + MessageInfos: file_BackupItemAction_proto_msgTypes, + }.Build() + File_BackupItemAction_proto = out.File + file_BackupItemAction_proto_rawDesc = nil + file_BackupItemAction_proto_goTypes = nil + file_BackupItemAction_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for BackupItemAction service +const _ = grpc.SupportPackageIsVersion6 +// BackupItemActionClient is the client API for BackupItemAction service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BackupItemActionClient interface { AppliesTo(ctx context.Context, in *BackupItemActionAppliesToRequest, opts ...grpc.CallOption) (*BackupItemActionAppliesToResponse, error) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) } type backupItemActionClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewBackupItemActionClient(cc *grpc.ClientConn) BackupItemActionClient { +func NewBackupItemActionClient(cc grpc.ClientConnInterface) BackupItemActionClient { return &backupItemActionClient{cc} } func (c *backupItemActionClient) AppliesTo(ctx context.Context, in *BackupItemActionAppliesToRequest, opts ...grpc.CallOption) (*BackupItemActionAppliesToResponse, error) { out := new(BackupItemActionAppliesToResponse) - err := grpc.Invoke(ctx, "/generated.BackupItemAction/AppliesTo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.BackupItemAction/AppliesTo", in, out, opts...) if err != nil { return nil, err } @@ -226,20 +434,30 @@ func (c *backupItemActionClient) AppliesTo(ctx context.Context, in *BackupItemAc func (c *backupItemActionClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) { out := new(ExecuteResponse) - err := grpc.Invoke(ctx, "/generated.BackupItemAction/Execute", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.BackupItemAction/Execute", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for BackupItemAction service - +// BackupItemActionServer is the server API for BackupItemAction service. type BackupItemActionServer interface { AppliesTo(context.Context, *BackupItemActionAppliesToRequest) (*BackupItemActionAppliesToResponse, error) Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) } +// UnimplementedBackupItemActionServer can be embedded to have forward compatible implementations. +type UnimplementedBackupItemActionServer struct { +} + +func (*UnimplementedBackupItemActionServer) AppliesTo(context.Context, *BackupItemActionAppliesToRequest) (*BackupItemActionAppliesToResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppliesTo not implemented") +} +func (*UnimplementedBackupItemActionServer) Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") +} + func RegisterBackupItemActionServer(s *grpc.Server, srv BackupItemActionServer) { s.RegisterService(&_BackupItemAction_serviceDesc, srv) } @@ -296,28 +514,3 @@ var _BackupItemAction_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "BackupItemAction.proto", } - -func init() { proto.RegisterFile("BackupItemAction.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 293 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4a, 0xc3, 0x40, - 0x10, 0x86, 0x49, 0x2b, 0x95, 0x4e, 0x8b, 0x2d, 0x7b, 0x28, 0x31, 0x22, 0xc4, 0x9c, 0x02, 0x4a, - 0x0e, 0xf1, 0xe6, 0xc9, 0x0a, 0x52, 0x7a, 0xdd, 0xf6, 0x05, 0xd2, 0x64, 0x5a, 0x17, 0xd3, 0xdd, - 0x75, 0x77, 0x03, 0x3e, 0x9c, 0x0f, 0x27, 0xd9, 0x6e, 0x43, 0x8c, 0xc5, 0x7a, 0xcb, 0x64, 0xe6, - 0xff, 0xe7, 0xfb, 0xd9, 0x81, 0xd9, 0x4b, 0x96, 0xbf, 0x57, 0x72, 0x69, 0x70, 0x3f, 0xcf, 0x0d, - 0x13, 0x3c, 0x91, 0x4a, 0x18, 0x41, 0x86, 0x3b, 0xe4, 0xa8, 0x32, 0x83, 0x45, 0x30, 0x5e, 0xbd, - 0x65, 0x0a, 0x8b, 0x43, 0x23, 0x5a, 0xc3, 0xd5, 0xeb, 0x27, 0xe6, 0x95, 0x41, 0x8a, 0x1f, 0x15, - 0x6a, 0x43, 0x66, 0x30, 0x90, 0x65, 0xb5, 0x63, 0xdc, 0xf7, 0x42, 0x2f, 0x1e, 0x52, 0x57, 0x11, - 0x02, 0x17, 0xcc, 0xe0, 0xde, 0xef, 0x85, 0x5e, 0x3c, 0xa6, 0xf6, 0xbb, 0x9e, 0xdd, 0xd8, 0x85, - 0x7e, 0xdf, 0xfe, 0x75, 0x55, 0xc4, 0x61, 0xd2, 0xb8, 0x6a, 0x29, 0xb8, 0xc6, 0x46, 0xee, 0xb5, - 0xe4, 0x0b, 0x98, 0x64, 0x45, 0xc1, 0x6a, 0xce, 0xac, 0xac, 0x99, 0xb5, 0xdf, 0x0b, 0xfb, 0xf1, - 0x28, 0xbd, 0x4d, 0x1a, 0xde, 0x84, 0xa2, 0x16, 0x95, 0xca, 0x71, 0x59, 0x20, 0x37, 0x6c, 0xcb, - 0x50, 0xd1, 0xae, 0x2a, 0x7a, 0x82, 0xb0, 0x1b, 0x7c, 0x2e, 0x65, 0xc9, 0x50, 0xaf, 0xc5, 0x99, - 0x5c, 0x51, 0x09, 0x77, 0x7f, 0x68, 0x1d, 0xfd, 0x02, 0xa6, 0x47, 0x8e, 0x15, 0x96, 0x98, 0x1b, - 0xa1, 0xac, 0xcd, 0x28, 0xbd, 0x39, 0x81, 0x7a, 0x1c, 0xa1, 0xbf, 0x44, 0xe9, 0x97, 0x07, 0xd3, - 0xee, 0x3a, 0xb2, 0x85, 0x61, 0xb3, 0x92, 0xdc, 0xb7, 0x0c, 0xcf, 0x85, 0x0a, 0x1e, 0xfe, 0x37, - 0xec, 0x52, 0x3c, 0xc3, 0xa5, 0x7b, 0x16, 0x72, 0xdd, 0x12, 0xfe, 0x3c, 0x80, 0x20, 0x38, 0xd5, - 0x3a, 0x38, 0x6c, 0x06, 0xf6, 0x6a, 0x1e, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x45, 0xdb, 0x5d, - 0x9f, 0x68, 0x02, 0x00, 0x00, -} diff --git a/pkg/plugin/generated/DeleteItemAction.pb.go b/pkg/plugin/generated/DeleteItemAction.pb.go index 1d0f64680f..be5759f381 100644 --- a/pkg/plugin/generated/DeleteItemAction.pb.go +++ b/pkg/plugin/generated/DeleteItemAction.pb.go @@ -1,122 +1,357 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: DeleteItemAction.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type DeleteItemActionExecuteRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` Backup []byte `protobuf:"bytes,3,opt,name=backup,proto3" json:"backup,omitempty"` } -func (m *DeleteItemActionExecuteRequest) Reset() { *m = DeleteItemActionExecuteRequest{} } -func (m *DeleteItemActionExecuteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteItemActionExecuteRequest) ProtoMessage() {} -func (*DeleteItemActionExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +func (x *DeleteItemActionExecuteRequest) Reset() { + *x = DeleteItemActionExecuteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_DeleteItemAction_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteItemActionExecuteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteItemActionExecuteRequest) ProtoMessage() {} + +func (x *DeleteItemActionExecuteRequest) ProtoReflect() protoreflect.Message { + mi := &file_DeleteItemAction_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteItemActionExecuteRequest.ProtoReflect.Descriptor instead. +func (*DeleteItemActionExecuteRequest) Descriptor() ([]byte, []int) { + return file_DeleteItemAction_proto_rawDescGZIP(), []int{0} +} -func (m *DeleteItemActionExecuteRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *DeleteItemActionExecuteRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *DeleteItemActionExecuteRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *DeleteItemActionExecuteRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *DeleteItemActionExecuteRequest) GetBackup() []byte { - if m != nil { - return m.Backup +func (x *DeleteItemActionExecuteRequest) GetBackup() []byte { + if x != nil { + return x.Backup } return nil } type DeleteItemActionAppliesToRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` +} + +func (x *DeleteItemActionAppliesToRequest) Reset() { + *x = DeleteItemActionAppliesToRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_DeleteItemAction_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteItemActionAppliesToRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteItemActionAppliesToRequest) Reset() { *m = DeleteItemActionAppliesToRequest{} } -func (m *DeleteItemActionAppliesToRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteItemActionAppliesToRequest) ProtoMessage() {} +func (*DeleteItemActionAppliesToRequest) ProtoMessage() {} + +func (x *DeleteItemActionAppliesToRequest) ProtoReflect() protoreflect.Message { + mi := &file_DeleteItemAction_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteItemActionAppliesToRequest.ProtoReflect.Descriptor instead. func (*DeleteItemActionAppliesToRequest) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{1} + return file_DeleteItemAction_proto_rawDescGZIP(), []int{1} } -func (m *DeleteItemActionAppliesToRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *DeleteItemActionAppliesToRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } type DeleteItemActionAppliesToResponse struct { - ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector" json:"ResourceSelector,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector,proto3" json:"ResourceSelector,omitempty"` +} + +func (x *DeleteItemActionAppliesToResponse) Reset() { + *x = DeleteItemActionAppliesToResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_DeleteItemAction_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteItemActionAppliesToResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteItemActionAppliesToResponse) ProtoMessage() {} + +func (x *DeleteItemActionAppliesToResponse) ProtoReflect() protoreflect.Message { + mi := &file_DeleteItemAction_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DeleteItemActionAppliesToResponse) Reset() { *m = DeleteItemActionAppliesToResponse{} } -func (m *DeleteItemActionAppliesToResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteItemActionAppliesToResponse) ProtoMessage() {} +// Deprecated: Use DeleteItemActionAppliesToResponse.ProtoReflect.Descriptor instead. func (*DeleteItemActionAppliesToResponse) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{2} + return file_DeleteItemAction_proto_rawDescGZIP(), []int{2} } -func (m *DeleteItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { - if m != nil { - return m.ResourceSelector +func (x *DeleteItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { + if x != nil { + return x.ResourceSelector } return nil } -func init() { - proto.RegisterType((*DeleteItemActionExecuteRequest)(nil), "generated.DeleteItemActionExecuteRequest") - proto.RegisterType((*DeleteItemActionAppliesToRequest)(nil), "generated.DeleteItemActionAppliesToRequest") - proto.RegisterType((*DeleteItemActionAppliesToResponse)(nil), "generated.DeleteItemActionAppliesToResponse") +var File_DeleteItemAction_proto protoreflect.FileDescriptor + +var file_DeleteItemAction_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x1a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x64, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x69, + 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, + 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x3a, 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, + 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x22, 0x6c, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x32, 0xc2, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, + 0x73, 0x54, 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, + 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, 0x6e, 0x7a, + 0x75, 0x2f, 0x76, 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_DeleteItemAction_proto_rawDescOnce sync.Once + file_DeleteItemAction_proto_rawDescData = file_DeleteItemAction_proto_rawDesc +) + +func file_DeleteItemAction_proto_rawDescGZIP() []byte { + file_DeleteItemAction_proto_rawDescOnce.Do(func() { + file_DeleteItemAction_proto_rawDescData = protoimpl.X.CompressGZIP(file_DeleteItemAction_proto_rawDescData) + }) + return file_DeleteItemAction_proto_rawDescData +} + +var file_DeleteItemAction_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_DeleteItemAction_proto_goTypes = []interface{}{ + (*DeleteItemActionExecuteRequest)(nil), // 0: generated.DeleteItemActionExecuteRequest + (*DeleteItemActionAppliesToRequest)(nil), // 1: generated.DeleteItemActionAppliesToRequest + (*DeleteItemActionAppliesToResponse)(nil), // 2: generated.DeleteItemActionAppliesToResponse + (*ResourceSelector)(nil), // 3: generated.ResourceSelector + (*Empty)(nil), // 4: generated.Empty +} +var file_DeleteItemAction_proto_depIdxs = []int32{ + 3, // 0: generated.DeleteItemActionAppliesToResponse.ResourceSelector:type_name -> generated.ResourceSelector + 1, // 1: generated.DeleteItemAction.AppliesTo:input_type -> generated.DeleteItemActionAppliesToRequest + 0, // 2: generated.DeleteItemAction.Execute:input_type -> generated.DeleteItemActionExecuteRequest + 2, // 3: generated.DeleteItemAction.AppliesTo:output_type -> generated.DeleteItemActionAppliesToResponse + 4, // 4: generated.DeleteItemAction.Execute:output_type -> generated.Empty + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_DeleteItemAction_proto_init() } +func file_DeleteItemAction_proto_init() { + if File_DeleteItemAction_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_DeleteItemAction_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteItemActionExecuteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_DeleteItemAction_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteItemActionAppliesToRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_DeleteItemAction_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteItemActionAppliesToResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_DeleteItemAction_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_DeleteItemAction_proto_goTypes, + DependencyIndexes: file_DeleteItemAction_proto_depIdxs, + MessageInfos: file_DeleteItemAction_proto_msgTypes, + }.Build() + File_DeleteItemAction_proto = out.File + file_DeleteItemAction_proto_rawDesc = nil + file_DeleteItemAction_proto_goTypes = nil + file_DeleteItemAction_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for DeleteItemAction service +const _ = grpc.SupportPackageIsVersion6 +// DeleteItemActionClient is the client API for DeleteItemAction service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DeleteItemActionClient interface { AppliesTo(ctx context.Context, in *DeleteItemActionAppliesToRequest, opts ...grpc.CallOption) (*DeleteItemActionAppliesToResponse, error) Execute(ctx context.Context, in *DeleteItemActionExecuteRequest, opts ...grpc.CallOption) (*Empty, error) } type deleteItemActionClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewDeleteItemActionClient(cc *grpc.ClientConn) DeleteItemActionClient { +func NewDeleteItemActionClient(cc grpc.ClientConnInterface) DeleteItemActionClient { return &deleteItemActionClient{cc} } func (c *deleteItemActionClient) AppliesTo(ctx context.Context, in *DeleteItemActionAppliesToRequest, opts ...grpc.CallOption) (*DeleteItemActionAppliesToResponse, error) { out := new(DeleteItemActionAppliesToResponse) - err := grpc.Invoke(ctx, "/generated.DeleteItemAction/AppliesTo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.DeleteItemAction/AppliesTo", in, out, opts...) if err != nil { return nil, err } @@ -125,20 +360,30 @@ func (c *deleteItemActionClient) AppliesTo(ctx context.Context, in *DeleteItemAc func (c *deleteItemActionClient) Execute(ctx context.Context, in *DeleteItemActionExecuteRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.DeleteItemAction/Execute", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.DeleteItemAction/Execute", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for DeleteItemAction service - +// DeleteItemActionServer is the server API for DeleteItemAction service. type DeleteItemActionServer interface { AppliesTo(context.Context, *DeleteItemActionAppliesToRequest) (*DeleteItemActionAppliesToResponse, error) Execute(context.Context, *DeleteItemActionExecuteRequest) (*Empty, error) } +// UnimplementedDeleteItemActionServer can be embedded to have forward compatible implementations. +type UnimplementedDeleteItemActionServer struct { +} + +func (*UnimplementedDeleteItemActionServer) AppliesTo(context.Context, *DeleteItemActionAppliesToRequest) (*DeleteItemActionAppliesToResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppliesTo not implemented") +} +func (*UnimplementedDeleteItemActionServer) Execute(context.Context, *DeleteItemActionExecuteRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") +} + func RegisterDeleteItemActionServer(s *grpc.Server, srv DeleteItemActionServer) { s.RegisterService(&_DeleteItemAction_serviceDesc, srv) } @@ -195,25 +440,3 @@ var _DeleteItemAction_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "DeleteItemAction.proto", } - -func init() { proto.RegisterFile("DeleteItemAction.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 253 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xc3, 0x40, - 0x14, 0x84, 0x89, 0x4a, 0x25, 0xcf, 0x1e, 0xc2, 0x1e, 0x4a, 0x88, 0x20, 0x31, 0xa7, 0x8a, 0x92, - 0x43, 0xbd, 0x79, 0x2b, 0x58, 0xc5, 0x6b, 0xea, 0x1f, 0x48, 0x37, 0x63, 0x0d, 0x6e, 0xb2, 0xeb, - 0xee, 0x5b, 0xd0, 0xbf, 0xe7, 0x2f, 0x13, 0x63, 0x28, 0x35, 0x42, 0xdb, 0xdb, 0xbe, 0xdd, 0x99, - 0xf7, 0x31, 0x3b, 0x34, 0xb9, 0x87, 0x02, 0xe3, 0x89, 0xd1, 0xcc, 0x25, 0xd7, 0xba, 0xcd, 0x8d, - 0xd5, 0xac, 0x45, 0xb8, 0x46, 0x0b, 0x5b, 0x32, 0xaa, 0x64, 0xbc, 0x7c, 0x2d, 0x2d, 0xaa, 0xdf, - 0x87, 0xac, 0xa2, 0x8b, 0xa1, 0x65, 0xf1, 0x01, 0xe9, 0x19, 0x05, 0xde, 0x3d, 0x1c, 0x8b, 0x09, - 0x8d, 0x8c, 0xf2, 0xeb, 0xba, 0x8d, 0x83, 0x34, 0x98, 0x86, 0x45, 0x3f, 0x09, 0x41, 0x27, 0x35, - 0xa3, 0x89, 0x8f, 0xd2, 0x60, 0x3a, 0x2e, 0xba, 0xf3, 0x8f, 0x76, 0x55, 0xca, 0x37, 0x6f, 0xe2, - 0xe3, 0xee, 0xb6, 0x9f, 0xb2, 0x3b, 0x4a, 0x87, 0x94, 0xb9, 0x31, 0xaa, 0x86, 0x7b, 0xd6, 0x7b, - 0x38, 0x99, 0xa2, 0xcb, 0x1d, 0x5e, 0x67, 0x74, 0xeb, 0x20, 0x1e, 0x29, 0x2a, 0xe0, 0xb4, 0xb7, - 0x12, 0x4b, 0x28, 0x48, 0xd6, 0xb6, 0x5b, 0x73, 0x36, 0x3b, 0xcf, 0x37, 0xd1, 0xf3, 0xa1, 0xa4, - 0xf8, 0x67, 0x9a, 0x7d, 0x05, 0x14, 0x0d, 0x71, 0xe2, 0x85, 0xc2, 0x0d, 0x52, 0x5c, 0x6f, 0x2d, - 0xdc, 0x17, 0x2a, 0xb9, 0x39, 0x4c, 0xdc, 0xa7, 0x78, 0xa0, 0xd3, 0xfe, 0xf3, 0xc5, 0xd5, 0x0e, - 0xe3, 0xdf, 0x82, 0x92, 0x68, 0x4b, 0xba, 0x68, 0x0c, 0x7f, 0xae, 0x46, 0x5d, 0xb7, 0xb7, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x31, 0x0b, 0xd3, 0x0e, 0x02, 0x00, 0x00, -} diff --git a/pkg/plugin/generated/ItemSnapshotter.pb.go b/pkg/plugin/generated/ItemSnapshotter.pb.go index dfcf73c87b..f1fdf27c4a 100644 --- a/pkg/plugin/generated/ItemSnapshotter.pb.go +++ b/pkg/plugin/generated/ItemSnapshotter.pb.go @@ -1,501 +1,1322 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: ItemSnapshotter.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type ItemSnapshotterAppliesToRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` +} + +func (x *ItemSnapshotterAppliesToRequest) Reset() { + *x = ItemSnapshotterAppliesToRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemSnapshotterAppliesToRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ItemSnapshotterAppliesToRequest) Reset() { *m = ItemSnapshotterAppliesToRequest{} } -func (m *ItemSnapshotterAppliesToRequest) String() string { return proto.CompactTextString(m) } -func (*ItemSnapshotterAppliesToRequest) ProtoMessage() {} +func (*ItemSnapshotterAppliesToRequest) ProtoMessage() {} + +func (x *ItemSnapshotterAppliesToRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemSnapshotterAppliesToRequest.ProtoReflect.Descriptor instead. func (*ItemSnapshotterAppliesToRequest) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{0} + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{0} } -func (m *ItemSnapshotterAppliesToRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ItemSnapshotterAppliesToRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } type ItemSnapshotterAppliesToResponse struct { - ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector" json:"ResourceSelector,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector,proto3" json:"ResourceSelector,omitempty"` +} + +func (x *ItemSnapshotterAppliesToResponse) Reset() { + *x = ItemSnapshotterAppliesToResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemSnapshotterAppliesToResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemSnapshotterAppliesToResponse) ProtoMessage() {} + +func (x *ItemSnapshotterAppliesToResponse) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ItemSnapshotterAppliesToResponse) Reset() { *m = ItemSnapshotterAppliesToResponse{} } -func (m *ItemSnapshotterAppliesToResponse) String() string { return proto.CompactTextString(m) } -func (*ItemSnapshotterAppliesToResponse) ProtoMessage() {} +// Deprecated: Use ItemSnapshotterAppliesToResponse.ProtoReflect.Descriptor instead. func (*ItemSnapshotterAppliesToResponse) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{1} + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{1} } -func (m *ItemSnapshotterAppliesToResponse) GetResourceSelector() *ResourceSelector { - if m != nil { - return m.ResourceSelector +func (x *ItemSnapshotterAppliesToResponse) GetResourceSelector() *ResourceSelector { + if x != nil { + return x.ResourceSelector } return nil } type AlsoHandlesRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` Backup []byte `protobuf:"bytes,3,opt,name=backup,proto3" json:"backup,omitempty"` } -func (m *AlsoHandlesRequest) Reset() { *m = AlsoHandlesRequest{} } -func (m *AlsoHandlesRequest) String() string { return proto.CompactTextString(m) } -func (*AlsoHandlesRequest) ProtoMessage() {} -func (*AlsoHandlesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } +func (x *AlsoHandlesRequest) Reset() { + *x = AlsoHandlesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlsoHandlesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *AlsoHandlesRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (*AlsoHandlesRequest) ProtoMessage() {} + +func (x *AlsoHandlesRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlsoHandlesRequest.ProtoReflect.Descriptor instead. +func (*AlsoHandlesRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{2} +} + +func (x *AlsoHandlesRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *AlsoHandlesRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *AlsoHandlesRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *AlsoHandlesRequest) GetBackup() []byte { - if m != nil { - return m.Backup +func (x *AlsoHandlesRequest) GetBackup() []byte { + if x != nil { + return x.Backup } return nil } type AlsoHandlesResponse struct { - HandledItems []*ResourceIdentifier `protobuf:"bytes,1,rep,name=handledItems" json:"handledItems,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HandledItems []*ResourceIdentifier `protobuf:"bytes,1,rep,name=handledItems,proto3" json:"handledItems,omitempty"` } -func (m *AlsoHandlesResponse) Reset() { *m = AlsoHandlesResponse{} } -func (m *AlsoHandlesResponse) String() string { return proto.CompactTextString(m) } -func (*AlsoHandlesResponse) ProtoMessage() {} -func (*AlsoHandlesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } +func (x *AlsoHandlesResponse) Reset() { + *x = AlsoHandlesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *AlsoHandlesResponse) GetHandledItems() []*ResourceIdentifier { - if m != nil { - return m.HandledItems +func (x *AlsoHandlesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlsoHandlesResponse) ProtoMessage() {} + +func (x *AlsoHandlesResponse) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlsoHandlesResponse.ProtoReflect.Descriptor instead. +func (*AlsoHandlesResponse) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{3} +} + +func (x *AlsoHandlesResponse) GetHandledItems() []*ResourceIdentifier { + if x != nil { + return x.HandledItems } return nil } type SnapshotItemRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` - Params map[string]string `protobuf:"bytes,3,rep,name=params" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Params map[string]string `protobuf:"bytes,3,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Backup []byte `protobuf:"bytes,4,opt,name=backup,proto3" json:"backup,omitempty"` } -func (m *SnapshotItemRequest) Reset() { *m = SnapshotItemRequest{} } -func (m *SnapshotItemRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotItemRequest) ProtoMessage() {} -func (*SnapshotItemRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } +func (x *SnapshotItemRequest) Reset() { + *x = SnapshotItemRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SnapshotItemRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SnapshotItemRequest) ProtoMessage() {} + +func (x *SnapshotItemRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotItemRequest.ProtoReflect.Descriptor instead. +func (*SnapshotItemRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{4} +} -func (m *SnapshotItemRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *SnapshotItemRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *SnapshotItemRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *SnapshotItemRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *SnapshotItemRequest) GetParams() map[string]string { - if m != nil { - return m.Params +func (x *SnapshotItemRequest) GetParams() map[string]string { + if x != nil { + return x.Params } return nil } -func (m *SnapshotItemRequest) GetBackup() []byte { - if m != nil { - return m.Backup +func (x *SnapshotItemRequest) GetBackup() []byte { + if x != nil { + return x.Backup } return nil } type SnapshotItemResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` - SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID" json:"snapshotID,omitempty"` - SnapshotMetadata map[string]string `protobuf:"bytes,3,rep,name=snapshotMetadata" json:"snapshotMetadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - AdditionalItems []*ResourceIdentifier `protobuf:"bytes,4,rep,name=additionalItems" json:"additionalItems,omitempty"` - HandledItems []*ResourceIdentifier `protobuf:"bytes,5,rep,name=handledItems" json:"handledItems,omitempty"` + SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` + SnapshotMetadata map[string]string `protobuf:"bytes,3,rep,name=snapshotMetadata,proto3" json:"snapshotMetadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + AdditionalItems []*ResourceIdentifier `protobuf:"bytes,4,rep,name=additionalItems,proto3" json:"additionalItems,omitempty"` + HandledItems []*ResourceIdentifier `protobuf:"bytes,5,rep,name=handledItems,proto3" json:"handledItems,omitempty"` +} + +func (x *SnapshotItemResponse) Reset() { + *x = SnapshotItemResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SnapshotItemResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SnapshotItemResponse) ProtoMessage() {} + +func (x *SnapshotItemResponse) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *SnapshotItemResponse) Reset() { *m = SnapshotItemResponse{} } -func (m *SnapshotItemResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotItemResponse) ProtoMessage() {} -func (*SnapshotItemResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } +// Deprecated: Use SnapshotItemResponse.ProtoReflect.Descriptor instead. +func (*SnapshotItemResponse) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{5} +} -func (m *SnapshotItemResponse) GetItem() []byte { - if m != nil { - return m.Item +func (x *SnapshotItemResponse) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *SnapshotItemResponse) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *SnapshotItemResponse) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } -func (m *SnapshotItemResponse) GetSnapshotMetadata() map[string]string { - if m != nil { - return m.SnapshotMetadata +func (x *SnapshotItemResponse) GetSnapshotMetadata() map[string]string { + if x != nil { + return x.SnapshotMetadata } return nil } -func (m *SnapshotItemResponse) GetAdditionalItems() []*ResourceIdentifier { - if m != nil { - return m.AdditionalItems +func (x *SnapshotItemResponse) GetAdditionalItems() []*ResourceIdentifier { + if x != nil { + return x.AdditionalItems } return nil } -func (m *SnapshotItemResponse) GetHandledItems() []*ResourceIdentifier { - if m != nil { - return m.HandledItems +func (x *SnapshotItemResponse) GetHandledItems() []*ResourceIdentifier { + if x != nil { + return x.HandledItems } return nil } type ProgressRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - ItemID *ResourceIdentifier `protobuf:"bytes,2,opt,name=itemID" json:"itemID,omitempty"` - SnapshotID string `protobuf:"bytes,3,opt,name=snapshotID" json:"snapshotID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + ItemID *ResourceIdentifier `protobuf:"bytes,2,opt,name=itemID,proto3" json:"itemID,omitempty"` + SnapshotID string `protobuf:"bytes,3,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` Backup []byte `protobuf:"bytes,4,opt,name=backup,proto3" json:"backup,omitempty"` } -func (m *ProgressRequest) Reset() { *m = ProgressRequest{} } -func (m *ProgressRequest) String() string { return proto.CompactTextString(m) } -func (*ProgressRequest) ProtoMessage() {} -func (*ProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } +func (x *ProgressRequest) Reset() { + *x = ProgressRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProgressRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProgressRequest) ProtoMessage() {} + +func (x *ProgressRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProgressRequest.ProtoReflect.Descriptor instead. +func (*ProgressRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{6} +} -func (m *ProgressRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ProgressRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ProgressRequest) GetItemID() *ResourceIdentifier { - if m != nil { - return m.ItemID +func (x *ProgressRequest) GetItemID() *ResourceIdentifier { + if x != nil { + return x.ItemID } return nil } -func (m *ProgressRequest) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *ProgressRequest) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } -func (m *ProgressRequest) GetBackup() []byte { - if m != nil { - return m.Backup +func (x *ProgressRequest) GetBackup() []byte { + if x != nil { + return x.Backup } return nil } type ProgressResponse struct { - Phase string `protobuf:"bytes,1,opt,name=phase" json:"phase,omitempty"` - ItemsCompleted int64 `protobuf:"varint,2,opt,name=itemsCompleted" json:"itemsCompleted,omitempty"` - ItemsToComplete int64 `protobuf:"varint,3,opt,name=itemsToComplete" json:"itemsToComplete,omitempty"` - Started int64 `protobuf:"varint,4,opt,name=started" json:"started,omitempty"` - StartedNano int64 `protobuf:"varint,5,opt,name=startedNano" json:"startedNano,omitempty"` - Updated int64 `protobuf:"varint,6,opt,name=updated" json:"updated,omitempty"` - UpdatedNano int64 `protobuf:"varint,7,opt,name=updatedNano" json:"updatedNano,omitempty"` - Err string `protobuf:"bytes,8,opt,name=err" json:"err,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Phase string `protobuf:"bytes,1,opt,name=phase,proto3" json:"phase,omitempty"` + ItemsCompleted int64 `protobuf:"varint,2,opt,name=itemsCompleted,proto3" json:"itemsCompleted,omitempty"` + ItemsToComplete int64 `protobuf:"varint,3,opt,name=itemsToComplete,proto3" json:"itemsToComplete,omitempty"` + Started int64 `protobuf:"varint,4,opt,name=started,proto3" json:"started,omitempty"` + StartedNano int64 `protobuf:"varint,5,opt,name=startedNano,proto3" json:"startedNano,omitempty"` + Updated int64 `protobuf:"varint,6,opt,name=updated,proto3" json:"updated,omitempty"` + UpdatedNano int64 `protobuf:"varint,7,opt,name=updatedNano,proto3" json:"updatedNano,omitempty"` + Err string `protobuf:"bytes,8,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *ProgressResponse) Reset() { + *x = ProgressResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProgressResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProgressResponse) Reset() { *m = ProgressResponse{} } -func (m *ProgressResponse) String() string { return proto.CompactTextString(m) } -func (*ProgressResponse) ProtoMessage() {} -func (*ProgressResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } +func (*ProgressResponse) ProtoMessage() {} -func (m *ProgressResponse) GetPhase() string { - if m != nil { - return m.Phase +func (x *ProgressResponse) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProgressResponse.ProtoReflect.Descriptor instead. +func (*ProgressResponse) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{7} +} + +func (x *ProgressResponse) GetPhase() string { + if x != nil { + return x.Phase } return "" } -func (m *ProgressResponse) GetItemsCompleted() int64 { - if m != nil { - return m.ItemsCompleted +func (x *ProgressResponse) GetItemsCompleted() int64 { + if x != nil { + return x.ItemsCompleted } return 0 } -func (m *ProgressResponse) GetItemsToComplete() int64 { - if m != nil { - return m.ItemsToComplete +func (x *ProgressResponse) GetItemsToComplete() int64 { + if x != nil { + return x.ItemsToComplete } return 0 } -func (m *ProgressResponse) GetStarted() int64 { - if m != nil { - return m.Started +func (x *ProgressResponse) GetStarted() int64 { + if x != nil { + return x.Started } return 0 } -func (m *ProgressResponse) GetStartedNano() int64 { - if m != nil { - return m.StartedNano +func (x *ProgressResponse) GetStartedNano() int64 { + if x != nil { + return x.StartedNano } return 0 } -func (m *ProgressResponse) GetUpdated() int64 { - if m != nil { - return m.Updated +func (x *ProgressResponse) GetUpdated() int64 { + if x != nil { + return x.Updated } return 0 } -func (m *ProgressResponse) GetUpdatedNano() int64 { - if m != nil { - return m.UpdatedNano +func (x *ProgressResponse) GetUpdatedNano() int64 { + if x != nil { + return x.UpdatedNano } return 0 } -func (m *ProgressResponse) GetErr() string { - if m != nil { - return m.Err +func (x *ProgressResponse) GetErr() string { + if x != nil { + return x.Err } return "" } type DeleteItemSnapshotRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID" json:"snapshotID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` ItemFromBackup []byte `protobuf:"bytes,3,opt,name=itemFromBackup,proto3" json:"itemFromBackup,omitempty"` - Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Params map[string]string `protobuf:"bytes,5,rep,name=params" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Params map[string]string `protobuf:"bytes,5,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DeleteItemSnapshotRequest) Reset() { + *x = DeleteItemSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteItemSnapshotRequest) Reset() { *m = DeleteItemSnapshotRequest{} } -func (m *DeleteItemSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteItemSnapshotRequest) ProtoMessage() {} -func (*DeleteItemSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } +func (x *DeleteItemSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteItemSnapshotRequest) ProtoMessage() {} + +func (x *DeleteItemSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *DeleteItemSnapshotRequest) GetPlugin() string { - if m != nil { - return m.Plugin +// Deprecated: Use DeleteItemSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteItemSnapshotRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteItemSnapshotRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *DeleteItemSnapshotRequest) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *DeleteItemSnapshotRequest) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } -func (m *DeleteItemSnapshotRequest) GetItemFromBackup() []byte { - if m != nil { - return m.ItemFromBackup +func (x *DeleteItemSnapshotRequest) GetItemFromBackup() []byte { + if x != nil { + return x.ItemFromBackup } return nil } -func (m *DeleteItemSnapshotRequest) GetMetadata() map[string]string { - if m != nil { - return m.Metadata +func (x *DeleteItemSnapshotRequest) GetMetadata() map[string]string { + if x != nil { + return x.Metadata } return nil } -func (m *DeleteItemSnapshotRequest) GetParams() map[string]string { - if m != nil { - return m.Params +func (x *DeleteItemSnapshotRequest) GetParams() map[string]string { + if x != nil { + return x.Params } return nil } type CreateItemFromSnapshotRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` - SnapshotID string `protobuf:"bytes,3,opt,name=snapshotID" json:"snapshotID,omitempty"` + SnapshotID string `protobuf:"bytes,3,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` ItemFromBackup []byte `protobuf:"bytes,4,opt,name=itemFromBackup,proto3" json:"itemFromBackup,omitempty"` - SnapshotMetadata map[string]string `protobuf:"bytes,5,rep,name=snapshotMetadata" json:"snapshotMetadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Params map[string]string `protobuf:"bytes,6,rep,name=params" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SnapshotMetadata map[string]string `protobuf:"bytes,5,rep,name=snapshotMetadata,proto3" json:"snapshotMetadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Params map[string]string `protobuf:"bytes,6,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Restore []byte `protobuf:"bytes,7,opt,name=restore,proto3" json:"restore,omitempty"` } -func (m *CreateItemFromSnapshotRequest) Reset() { *m = CreateItemFromSnapshotRequest{} } -func (m *CreateItemFromSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateItemFromSnapshotRequest) ProtoMessage() {} -func (*CreateItemFromSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } +func (x *CreateItemFromSnapshotRequest) Reset() { + *x = CreateItemFromSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CreateItemFromSnapshotRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *CreateItemFromSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateItemFromSnapshotRequest) ProtoMessage() {} + +func (x *CreateItemFromSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateItemFromSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateItemFromSnapshotRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateItemFromSnapshotRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *CreateItemFromSnapshotRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *CreateItemFromSnapshotRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *CreateItemFromSnapshotRequest) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *CreateItemFromSnapshotRequest) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } -func (m *CreateItemFromSnapshotRequest) GetItemFromBackup() []byte { - if m != nil { - return m.ItemFromBackup +func (x *CreateItemFromSnapshotRequest) GetItemFromBackup() []byte { + if x != nil { + return x.ItemFromBackup } return nil } -func (m *CreateItemFromSnapshotRequest) GetSnapshotMetadata() map[string]string { - if m != nil { - return m.SnapshotMetadata +func (x *CreateItemFromSnapshotRequest) GetSnapshotMetadata() map[string]string { + if x != nil { + return x.SnapshotMetadata } return nil } -func (m *CreateItemFromSnapshotRequest) GetParams() map[string]string { - if m != nil { - return m.Params +func (x *CreateItemFromSnapshotRequest) GetParams() map[string]string { + if x != nil { + return x.Params } return nil } -func (m *CreateItemFromSnapshotRequest) GetRestore() []byte { - if m != nil { - return m.Restore +func (x *CreateItemFromSnapshotRequest) GetRestore() []byte { + if x != nil { + return x.Restore } return nil } type CreateItemFromSnapshotResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` - AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems" json:"additionalItems,omitempty"` - SkipRestore bool `protobuf:"varint,3,opt,name=skipRestore" json:"skipRestore,omitempty"` + AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems,proto3" json:"additionalItems,omitempty"` + SkipRestore bool `protobuf:"varint,3,opt,name=skipRestore,proto3" json:"skipRestore,omitempty"` } -func (m *CreateItemFromSnapshotResponse) Reset() { *m = CreateItemFromSnapshotResponse{} } -func (m *CreateItemFromSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateItemFromSnapshotResponse) ProtoMessage() {} +func (x *CreateItemFromSnapshotResponse) Reset() { + *x = CreateItemFromSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateItemFromSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateItemFromSnapshotResponse) ProtoMessage() {} + +func (x *CreateItemFromSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateItemFromSnapshotResponse.ProtoReflect.Descriptor instead. func (*CreateItemFromSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{10} + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{10} } -func (m *CreateItemFromSnapshotResponse) GetItem() []byte { - if m != nil { - return m.Item +func (x *CreateItemFromSnapshotResponse) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *CreateItemFromSnapshotResponse) GetAdditionalItems() []*ResourceIdentifier { - if m != nil { - return m.AdditionalItems +func (x *CreateItemFromSnapshotResponse) GetAdditionalItems() []*ResourceIdentifier { + if x != nil { + return x.AdditionalItems } return nil } -func (m *CreateItemFromSnapshotResponse) GetSkipRestore() bool { - if m != nil { - return m.SkipRestore +func (x *CreateItemFromSnapshotResponse) GetSkipRestore() bool { + if x != nil { + return x.SkipRestore } return false } type ItemSnapshotterInitRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Config map[string]string `protobuf:"bytes,2,rep,name=config" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Config map[string]string `protobuf:"bytes,2,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ItemSnapshotterInitRequest) Reset() { *m = ItemSnapshotterInitRequest{} } -func (m *ItemSnapshotterInitRequest) String() string { return proto.CompactTextString(m) } -func (*ItemSnapshotterInitRequest) ProtoMessage() {} -func (*ItemSnapshotterInitRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } +func (x *ItemSnapshotterInitRequest) Reset() { + *x = ItemSnapshotterInitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ItemSnapshotter_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemSnapshotterInitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemSnapshotterInitRequest) ProtoMessage() {} -func (m *ItemSnapshotterInitRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ItemSnapshotterInitRequest) ProtoReflect() protoreflect.Message { + mi := &file_ItemSnapshotter_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemSnapshotterInitRequest.ProtoReflect.Descriptor instead. +func (*ItemSnapshotterInitRequest) Descriptor() ([]byte, []int) { + return file_ItemSnapshotter_proto_rawDescGZIP(), []int{11} +} + +func (x *ItemSnapshotterInitRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ItemSnapshotterInitRequest) GetConfig() map[string]string { - if m != nil { - return m.Config +func (x *ItemSnapshotterInitRequest) GetConfig() map[string]string { + if x != nil { + return x.Config } return nil } -func init() { - proto.RegisterType((*ItemSnapshotterAppliesToRequest)(nil), "generated.ItemSnapshotterAppliesToRequest") - proto.RegisterType((*ItemSnapshotterAppliesToResponse)(nil), "generated.ItemSnapshotterAppliesToResponse") - proto.RegisterType((*AlsoHandlesRequest)(nil), "generated.AlsoHandlesRequest") - proto.RegisterType((*AlsoHandlesResponse)(nil), "generated.AlsoHandlesResponse") - proto.RegisterType((*SnapshotItemRequest)(nil), "generated.SnapshotItemRequest") - proto.RegisterType((*SnapshotItemResponse)(nil), "generated.SnapshotItemResponse") - proto.RegisterType((*ProgressRequest)(nil), "generated.ProgressRequest") - proto.RegisterType((*ProgressResponse)(nil), "generated.ProgressResponse") - proto.RegisterType((*DeleteItemSnapshotRequest)(nil), "generated.DeleteItemSnapshotRequest") - proto.RegisterType((*CreateItemFromSnapshotRequest)(nil), "generated.CreateItemFromSnapshotRequest") - proto.RegisterType((*CreateItemFromSnapshotResponse)(nil), "generated.CreateItemFromSnapshotResponse") - proto.RegisterType((*ItemSnapshotterInitRequest)(nil), "generated.ItemSnapshotterInitRequest") +var File_ItemSnapshotter_proto protoreflect.FileDescriptor + +var file_ItemSnapshotter_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x1a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x39, 0x0a, 0x1f, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0x6b, 0x0a, 0x20, 0x49, + 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x41, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x58, 0x0a, 0x12, 0x41, 0x6c, 0x73, 0x6f, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x22, 0x58, 0x0a, 0x13, 0x41, 0x6c, 0x73, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x68, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0c, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xd8, 0x01, 0x0a, + 0x13, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x69, 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, + 0x12, 0x42, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x39, 0x0a, 0x0b, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, 0x0a, 0x14, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x69, 0x74, 0x65, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x49, 0x44, 0x12, 0x61, 0x0a, 0x10, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, + 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x41, 0x0a, 0x0c, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0c, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x49, 0x74, + 0x65, 0x6d, 0x73, 0x1a, 0x43, 0x0a, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x98, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x12, 0x35, 0x0a, 0x06, 0x69, 0x74, 0x65, 0x6d, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x06, 0x69, 0x74, 0x65, 0x6d, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x26, + 0x0a, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x54, + 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x4e, 0x61, 0x6e, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x18, 0x0a, 0x07, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x4e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x8d, 0x03, 0x0a, 0x19, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, + 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x46, 0x72, + 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe7, 0x03, 0x0a, 0x1d, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x74, 0x65, 0x6d, + 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x6a, 0x0a, 0x10, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x1a, 0x43, 0x0a, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9f, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, 0x47, 0x0a, 0x0f, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x49, 0x74, 0x65, 0x6d, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x49, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x32, 0xd5, 0x04, 0x0a, 0x0f, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x25, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x49, 0x74, 0x65, 0x6d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x65, 0x73, 0x54, 0x6f, 0x12, 0x2a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, + 0x72, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x49, 0x74, + 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, + 0x0a, 0x0b, 0x41, 0x6c, 0x73, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x41, 0x6c, 0x73, 0x6f, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x41, 0x6c, 0x73, 0x6f, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0c, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x1e, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, + 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x2e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x6d, 0x0a, 0x16, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x28, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, + 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, + 0x2d, 0x74, 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_ItemSnapshotter_proto_rawDescOnce sync.Once + file_ItemSnapshotter_proto_rawDescData = file_ItemSnapshotter_proto_rawDesc +) + +func file_ItemSnapshotter_proto_rawDescGZIP() []byte { + file_ItemSnapshotter_proto_rawDescOnce.Do(func() { + file_ItemSnapshotter_proto_rawDescData = protoimpl.X.CompressGZIP(file_ItemSnapshotter_proto_rawDescData) + }) + return file_ItemSnapshotter_proto_rawDescData +} + +var file_ItemSnapshotter_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_ItemSnapshotter_proto_goTypes = []interface{}{ + (*ItemSnapshotterAppliesToRequest)(nil), // 0: generated.ItemSnapshotterAppliesToRequest + (*ItemSnapshotterAppliesToResponse)(nil), // 1: generated.ItemSnapshotterAppliesToResponse + (*AlsoHandlesRequest)(nil), // 2: generated.AlsoHandlesRequest + (*AlsoHandlesResponse)(nil), // 3: generated.AlsoHandlesResponse + (*SnapshotItemRequest)(nil), // 4: generated.SnapshotItemRequest + (*SnapshotItemResponse)(nil), // 5: generated.SnapshotItemResponse + (*ProgressRequest)(nil), // 6: generated.ProgressRequest + (*ProgressResponse)(nil), // 7: generated.ProgressResponse + (*DeleteItemSnapshotRequest)(nil), // 8: generated.DeleteItemSnapshotRequest + (*CreateItemFromSnapshotRequest)(nil), // 9: generated.CreateItemFromSnapshotRequest + (*CreateItemFromSnapshotResponse)(nil), // 10: generated.CreateItemFromSnapshotResponse + (*ItemSnapshotterInitRequest)(nil), // 11: generated.ItemSnapshotterInitRequest + nil, // 12: generated.SnapshotItemRequest.ParamsEntry + nil, // 13: generated.SnapshotItemResponse.SnapshotMetadataEntry + nil, // 14: generated.DeleteItemSnapshotRequest.MetadataEntry + nil, // 15: generated.DeleteItemSnapshotRequest.ParamsEntry + nil, // 16: generated.CreateItemFromSnapshotRequest.SnapshotMetadataEntry + nil, // 17: generated.CreateItemFromSnapshotRequest.ParamsEntry + nil, // 18: generated.ItemSnapshotterInitRequest.ConfigEntry + (*ResourceSelector)(nil), // 19: generated.ResourceSelector + (*ResourceIdentifier)(nil), // 20: generated.ResourceIdentifier + (*Empty)(nil), // 21: generated.Empty +} +var file_ItemSnapshotter_proto_depIdxs = []int32{ + 19, // 0: generated.ItemSnapshotterAppliesToResponse.ResourceSelector:type_name -> generated.ResourceSelector + 20, // 1: generated.AlsoHandlesResponse.handledItems:type_name -> generated.ResourceIdentifier + 12, // 2: generated.SnapshotItemRequest.params:type_name -> generated.SnapshotItemRequest.ParamsEntry + 13, // 3: generated.SnapshotItemResponse.snapshotMetadata:type_name -> generated.SnapshotItemResponse.SnapshotMetadataEntry + 20, // 4: generated.SnapshotItemResponse.additionalItems:type_name -> generated.ResourceIdentifier + 20, // 5: generated.SnapshotItemResponse.handledItems:type_name -> generated.ResourceIdentifier + 20, // 6: generated.ProgressRequest.itemID:type_name -> generated.ResourceIdentifier + 14, // 7: generated.DeleteItemSnapshotRequest.metadata:type_name -> generated.DeleteItemSnapshotRequest.MetadataEntry + 15, // 8: generated.DeleteItemSnapshotRequest.params:type_name -> generated.DeleteItemSnapshotRequest.ParamsEntry + 16, // 9: generated.CreateItemFromSnapshotRequest.snapshotMetadata:type_name -> generated.CreateItemFromSnapshotRequest.SnapshotMetadataEntry + 17, // 10: generated.CreateItemFromSnapshotRequest.params:type_name -> generated.CreateItemFromSnapshotRequest.ParamsEntry + 20, // 11: generated.CreateItemFromSnapshotResponse.additionalItems:type_name -> generated.ResourceIdentifier + 18, // 12: generated.ItemSnapshotterInitRequest.config:type_name -> generated.ItemSnapshotterInitRequest.ConfigEntry + 11, // 13: generated.ItemSnapshotter.Init:input_type -> generated.ItemSnapshotterInitRequest + 0, // 14: generated.ItemSnapshotter.AppliesTo:input_type -> generated.ItemSnapshotterAppliesToRequest + 2, // 15: generated.ItemSnapshotter.AlsoHandles:input_type -> generated.AlsoHandlesRequest + 4, // 16: generated.ItemSnapshotter.SnapshotItem:input_type -> generated.SnapshotItemRequest + 6, // 17: generated.ItemSnapshotter.Progress:input_type -> generated.ProgressRequest + 8, // 18: generated.ItemSnapshotter.DeleteSnapshot:input_type -> generated.DeleteItemSnapshotRequest + 9, // 19: generated.ItemSnapshotter.CreateItemFromSnapshot:input_type -> generated.CreateItemFromSnapshotRequest + 21, // 20: generated.ItemSnapshotter.Init:output_type -> generated.Empty + 1, // 21: generated.ItemSnapshotter.AppliesTo:output_type -> generated.ItemSnapshotterAppliesToResponse + 3, // 22: generated.ItemSnapshotter.AlsoHandles:output_type -> generated.AlsoHandlesResponse + 5, // 23: generated.ItemSnapshotter.SnapshotItem:output_type -> generated.SnapshotItemResponse + 7, // 24: generated.ItemSnapshotter.Progress:output_type -> generated.ProgressResponse + 21, // 25: generated.ItemSnapshotter.DeleteSnapshot:output_type -> generated.Empty + 10, // 26: generated.ItemSnapshotter.CreateItemFromSnapshot:output_type -> generated.CreateItemFromSnapshotResponse + 20, // [20:27] is the sub-list for method output_type + 13, // [13:20] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_ItemSnapshotter_proto_init() } +func file_ItemSnapshotter_proto_init() { + if File_ItemSnapshotter_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_ItemSnapshotter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemSnapshotterAppliesToRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemSnapshotterAppliesToResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlsoHandlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlsoHandlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotItemRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotItemResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProgressRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProgressResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteItemSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateItemFromSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateItemFromSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ItemSnapshotter_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemSnapshotterInitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ItemSnapshotter_proto_rawDesc, + NumEnums: 0, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_ItemSnapshotter_proto_goTypes, + DependencyIndexes: file_ItemSnapshotter_proto_depIdxs, + MessageInfos: file_ItemSnapshotter_proto_msgTypes, + }.Build() + File_ItemSnapshotter_proto = out.File + file_ItemSnapshotter_proto_rawDesc = nil + file_ItemSnapshotter_proto_goTypes = nil + file_ItemSnapshotter_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for ItemSnapshotter service +const _ = grpc.SupportPackageIsVersion6 +// ItemSnapshotterClient is the client API for ItemSnapshotter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ItemSnapshotterClient interface { Init(ctx context.Context, in *ItemSnapshotterInitRequest, opts ...grpc.CallOption) (*Empty, error) AppliesTo(ctx context.Context, in *ItemSnapshotterAppliesToRequest, opts ...grpc.CallOption) (*ItemSnapshotterAppliesToResponse, error) @@ -507,16 +1328,16 @@ type ItemSnapshotterClient interface { } type itemSnapshotterClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewItemSnapshotterClient(cc *grpc.ClientConn) ItemSnapshotterClient { +func NewItemSnapshotterClient(cc grpc.ClientConnInterface) ItemSnapshotterClient { return &itemSnapshotterClient{cc} } func (c *itemSnapshotterClient) Init(ctx context.Context, in *ItemSnapshotterInitRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/Init", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/Init", in, out, opts...) if err != nil { return nil, err } @@ -525,7 +1346,7 @@ func (c *itemSnapshotterClient) Init(ctx context.Context, in *ItemSnapshotterIni func (c *itemSnapshotterClient) AppliesTo(ctx context.Context, in *ItemSnapshotterAppliesToRequest, opts ...grpc.CallOption) (*ItemSnapshotterAppliesToResponse, error) { out := new(ItemSnapshotterAppliesToResponse) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/AppliesTo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/AppliesTo", in, out, opts...) if err != nil { return nil, err } @@ -534,7 +1355,7 @@ func (c *itemSnapshotterClient) AppliesTo(ctx context.Context, in *ItemSnapshott func (c *itemSnapshotterClient) AlsoHandles(ctx context.Context, in *AlsoHandlesRequest, opts ...grpc.CallOption) (*AlsoHandlesResponse, error) { out := new(AlsoHandlesResponse) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/AlsoHandles", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/AlsoHandles", in, out, opts...) if err != nil { return nil, err } @@ -543,7 +1364,7 @@ func (c *itemSnapshotterClient) AlsoHandles(ctx context.Context, in *AlsoHandles func (c *itemSnapshotterClient) SnapshotItem(ctx context.Context, in *SnapshotItemRequest, opts ...grpc.CallOption) (*SnapshotItemResponse, error) { out := new(SnapshotItemResponse) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/SnapshotItem", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/SnapshotItem", in, out, opts...) if err != nil { return nil, err } @@ -552,7 +1373,7 @@ func (c *itemSnapshotterClient) SnapshotItem(ctx context.Context, in *SnapshotIt func (c *itemSnapshotterClient) Progress(ctx context.Context, in *ProgressRequest, opts ...grpc.CallOption) (*ProgressResponse, error) { out := new(ProgressResponse) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/Progress", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/Progress", in, out, opts...) if err != nil { return nil, err } @@ -561,7 +1382,7 @@ func (c *itemSnapshotterClient) Progress(ctx context.Context, in *ProgressReques func (c *itemSnapshotterClient) DeleteSnapshot(ctx context.Context, in *DeleteItemSnapshotRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/DeleteSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/DeleteSnapshot", in, out, opts...) if err != nil { return nil, err } @@ -570,15 +1391,14 @@ func (c *itemSnapshotterClient) DeleteSnapshot(ctx context.Context, in *DeleteIt func (c *itemSnapshotterClient) CreateItemFromSnapshot(ctx context.Context, in *CreateItemFromSnapshotRequest, opts ...grpc.CallOption) (*CreateItemFromSnapshotResponse, error) { out := new(CreateItemFromSnapshotResponse) - err := grpc.Invoke(ctx, "/generated.ItemSnapshotter/CreateItemFromSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ItemSnapshotter/CreateItemFromSnapshot", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for ItemSnapshotter service - +// ItemSnapshotterServer is the server API for ItemSnapshotter service. type ItemSnapshotterServer interface { Init(context.Context, *ItemSnapshotterInitRequest) (*Empty, error) AppliesTo(context.Context, *ItemSnapshotterAppliesToRequest) (*ItemSnapshotterAppliesToResponse, error) @@ -589,6 +1409,32 @@ type ItemSnapshotterServer interface { CreateItemFromSnapshot(context.Context, *CreateItemFromSnapshotRequest) (*CreateItemFromSnapshotResponse, error) } +// UnimplementedItemSnapshotterServer can be embedded to have forward compatible implementations. +type UnimplementedItemSnapshotterServer struct { +} + +func (*UnimplementedItemSnapshotterServer) Init(context.Context, *ItemSnapshotterInitRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") +} +func (*UnimplementedItemSnapshotterServer) AppliesTo(context.Context, *ItemSnapshotterAppliesToRequest) (*ItemSnapshotterAppliesToResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppliesTo not implemented") +} +func (*UnimplementedItemSnapshotterServer) AlsoHandles(context.Context, *AlsoHandlesRequest) (*AlsoHandlesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AlsoHandles not implemented") +} +func (*UnimplementedItemSnapshotterServer) SnapshotItem(context.Context, *SnapshotItemRequest) (*SnapshotItemResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SnapshotItem not implemented") +} +func (*UnimplementedItemSnapshotterServer) Progress(context.Context, *ProgressRequest) (*ProgressResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Progress not implemented") +} +func (*UnimplementedItemSnapshotterServer) DeleteSnapshot(context.Context, *DeleteItemSnapshotRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedItemSnapshotterServer) CreateItemFromSnapshot(context.Context, *CreateItemFromSnapshotRequest) (*CreateItemFromSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateItemFromSnapshot not implemented") +} + func RegisterItemSnapshotterServer(s *grpc.Server, srv ItemSnapshotterServer) { s.RegisterService(&_ItemSnapshotter_serviceDesc, srv) } @@ -755,65 +1601,3 @@ var _ItemSnapshotter_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "ItemSnapshotter.proto", } - -func init() { proto.RegisterFile("ItemSnapshotter.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 887 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x5f, 0x8f, 0xdb, 0x44, - 0x10, 0x97, 0xe3, 0x5c, 0xee, 0x6e, 0x12, 0x7a, 0xd1, 0xf6, 0x5a, 0x19, 0x57, 0xbd, 0x46, 0x16, - 0xa0, 0x50, 0xa4, 0x08, 0x0e, 0x2a, 0x51, 0x90, 0x40, 0xd7, 0xb4, 0xf4, 0x4e, 0x2a, 0xa5, 0xf2, - 0xf5, 0xa1, 0xaf, 0xdb, 0x78, 0x9b, 0x98, 0xd8, 0x5e, 0xb3, 0xbb, 0x41, 0xba, 0x77, 0x5e, 0x79, - 0xe7, 0x8d, 0xef, 0xc1, 0xf7, 0x40, 0xe2, 0x1b, 0xf0, 0x2d, 0x10, 0xda, 0x3f, 0x0e, 0x1b, 0xc7, - 0x3e, 0xfb, 0xee, 0x78, 0xf3, 0xce, 0xce, 0xcc, 0xfe, 0x66, 0x7e, 0xb3, 0xb3, 0x63, 0xb8, 0x73, - 0x26, 0x48, 0x7a, 0x9e, 0xe1, 0x9c, 0x2f, 0xa8, 0x10, 0x84, 0x4d, 0x72, 0x46, 0x05, 0x45, 0xfb, - 0x73, 0x92, 0x11, 0x86, 0x05, 0x89, 0xfc, 0xc1, 0xf9, 0x02, 0x33, 0x12, 0xe9, 0x8d, 0xe0, 0x31, - 0x3c, 0x28, 0x59, 0x9c, 0xe4, 0x79, 0x12, 0x13, 0xfe, 0x9a, 0x86, 0xe4, 0xa7, 0x15, 0xe1, 0x02, - 0xdd, 0x85, 0x5e, 0x9e, 0xac, 0xe6, 0x71, 0xe6, 0x39, 0x23, 0x67, 0xbc, 0x1f, 0x9a, 0x55, 0xb0, - 0x84, 0x51, 0xbd, 0x29, 0xcf, 0x69, 0xc6, 0x09, 0x7a, 0x0e, 0xc3, 0x90, 0x70, 0xba, 0x62, 0x33, - 0x72, 0x4e, 0x12, 0x32, 0x13, 0x94, 0x29, 0x2f, 0xfd, 0xe3, 0x7b, 0x93, 0x35, 0xa4, 0x49, 0x59, - 0x25, 0xdc, 0x32, 0x0a, 0xde, 0x00, 0x3a, 0x49, 0x38, 0x3d, 0xc5, 0x59, 0x94, 0x10, 0xde, 0x00, - 0x0d, 0x21, 0xe8, 0xc6, 0x82, 0xa4, 0x5e, 0x67, 0xe4, 0x8c, 0x07, 0xa1, 0xfa, 0x96, 0xba, 0x6f, - 0xf1, 0x6c, 0xb9, 0xca, 0x3d, 0x57, 0x49, 0xcd, 0x2a, 0x78, 0x03, 0xb7, 0x37, 0x3c, 0x1b, 0xe4, - 0x27, 0x30, 0x58, 0x28, 0x51, 0x24, 0x83, 0xe4, 0x9e, 0x33, 0x72, 0xc7, 0xfd, 0xe3, 0xfb, 0x15, - 0xa8, 0xcf, 0x22, 0x92, 0x89, 0xf8, 0x5d, 0x4c, 0x58, 0xb8, 0x61, 0x12, 0xfc, 0xe5, 0xc0, 0xed, - 0x22, 0x3b, 0x52, 0x72, 0x1d, 0xd4, 0x4f, 0xa0, 0x97, 0x63, 0x86, 0x53, 0xee, 0xb9, 0x0a, 0xc0, - 0x43, 0x0b, 0x40, 0x85, 0xef, 0xc9, 0x2b, 0xa5, 0xfc, 0x2c, 0x13, 0xec, 0x22, 0x34, 0x96, 0x56, - 0xe4, 0x5d, 0x3b, 0x72, 0xff, 0x31, 0xf4, 0x2d, 0x75, 0x34, 0x04, 0x77, 0x49, 0x2e, 0x0c, 0x26, - 0xf9, 0x89, 0x0e, 0x61, 0xe7, 0x67, 0x9c, 0xac, 0x88, 0x42, 0xb4, 0x1f, 0xea, 0xc5, 0x57, 0x9d, - 0x2f, 0x9d, 0xe0, 0x9f, 0x0e, 0x1c, 0x6e, 0x1e, 0x6f, 0xd2, 0x56, 0xc4, 0xe0, 0x58, 0x31, 0x1c, - 0x01, 0xf0, 0x42, 0xf7, 0xa9, 0xf1, 0x65, 0x49, 0x10, 0x86, 0x61, 0xb1, 0xfa, 0x9e, 0x08, 0x1c, - 0x61, 0x81, 0x4d, 0xb4, 0x8f, 0x6a, 0xa3, 0xd5, 0xc7, 0xad, 0x85, 0x85, 0x9d, 0x0e, 0x7c, 0xcb, - 0x1d, 0x7a, 0x0e, 0x07, 0x38, 0x8a, 0x62, 0x11, 0xd3, 0x0c, 0x27, 0x9a, 0xd0, 0x6e, 0x1b, 0x42, - 0xcb, 0x56, 0x5b, 0x65, 0xb1, 0x73, 0xe5, 0xb2, 0xf0, 0xa7, 0x70, 0xa7, 0x12, 0xf6, 0x95, 0x08, - 0xf8, 0xcd, 0x81, 0x83, 0x57, 0x8c, 0xce, 0x19, 0xe1, 0x8d, 0xb7, 0xe1, 0x11, 0xf4, 0x24, 0x0f, - 0x26, 0xf7, 0x8d, 0x68, 0x8d, 0x72, 0x89, 0x36, 0x77, 0x8b, 0xb6, 0x9a, 0xb2, 0x0a, 0x7e, 0xe9, - 0xc0, 0xf0, 0x3f, 0x68, 0xa6, 0x2e, 0x0e, 0x61, 0x27, 0x5f, 0x60, 0x4e, 0x0c, 0x34, 0xbd, 0x40, - 0x1f, 0xc1, 0x2d, 0x79, 0x18, 0x9f, 0xd2, 0x34, 0x4f, 0x88, 0x20, 0x91, 0x42, 0xe8, 0x86, 0x25, - 0x29, 0x1a, 0xc3, 0x81, 0x92, 0xbc, 0xa6, 0x85, 0x4c, 0xe1, 0x71, 0xc3, 0xb2, 0x18, 0x79, 0xb0, - 0xcb, 0x05, 0x66, 0xd2, 0x55, 0x57, 0x69, 0x14, 0x4b, 0x34, 0x82, 0xbe, 0xf9, 0x7c, 0x89, 0x33, - 0xea, 0xed, 0xa8, 0x5d, 0x5b, 0x24, 0x6d, 0x57, 0x79, 0x24, 0xd3, 0xe2, 0xf5, 0xb4, 0xad, 0x59, - 0x4a, 0x5b, 0xf3, 0xa9, 0x6c, 0x77, 0xb5, 0xad, 0x25, 0x92, 0xdc, 0x11, 0xc6, 0xbc, 0x3d, 0xcd, - 0x1d, 0x61, 0x2c, 0xf8, 0xd5, 0x85, 0xf7, 0x9f, 0x12, 0x09, 0xca, 0xee, 0x92, 0x4d, 0x5c, 0x35, - 0xdd, 0x15, 0x93, 0xb1, 0xef, 0x18, 0x4d, 0x9f, 0xd8, 0xdd, 0xac, 0x24, 0x45, 0x2f, 0x61, 0x2f, - 0x2d, 0xee, 0x92, 0xae, 0xf4, 0x63, 0x8b, 0xf5, 0x5a, 0x5c, 0x93, 0xcd, 0x8b, 0xb4, 0xf6, 0x81, - 0x4e, 0xd7, 0x7d, 0x48, 0x57, 0xfc, 0xa7, 0xad, 0xbc, 0x55, 0x74, 0x23, 0xff, 0x6b, 0x78, 0xef, - 0xda, 0x65, 0x7f, 0x93, 0x96, 0xf5, 0xb7, 0x0b, 0xf7, 0xa7, 0x8c, 0x60, 0x8d, 0x54, 0xa6, 0xaa, - 0x2d, 0x27, 0x55, 0x7d, 0xb9, 0xe9, 0x72, 0x6c, 0xf3, 0xd4, 0xad, 0xe4, 0xe9, 0xc7, 0x8a, 0xde, - 0xa7, 0x33, 0xfc, 0x8d, 0x95, 0xe1, 0x4b, 0x71, 0xb7, 0x6e, 0x82, 0x2f, 0xd6, 0x1c, 0xf6, 0xd4, - 0x09, 0x5f, 0xb4, 0x3e, 0xa1, 0xea, 0x55, 0xf1, 0x60, 0x97, 0x11, 0x2e, 0x28, 0x23, 0xea, 0x3e, - 0x0c, 0xc2, 0x62, 0xf9, 0xbf, 0x34, 0xb8, 0x9b, 0x30, 0xfd, 0xbb, 0x03, 0x47, 0x75, 0xf1, 0x5c, - 0xf2, 0x4c, 0x55, 0xbc, 0x11, 0x9d, 0x6b, 0xbd, 0x11, 0xb2, 0xd3, 0x2c, 0xe3, 0x3c, 0x34, 0xd9, - 0x91, 0xc5, 0xb1, 0x17, 0xda, 0xa2, 0xe0, 0x0f, 0x07, 0xfc, 0xd2, 0xec, 0x74, 0x96, 0xc5, 0x8d, - 0x85, 0x78, 0x06, 0xbd, 0x19, 0xcd, 0xde, 0xc5, 0x73, 0x03, 0xec, 0x33, 0x0b, 0x58, 0xbd, 0xbb, - 0xc9, 0x54, 0xd9, 0x18, 0xf6, 0xb4, 0x03, 0x99, 0x5e, 0x4b, 0x7c, 0x95, 0xf4, 0x1e, 0xff, 0xd9, - 0x85, 0x83, 0xd2, 0x69, 0xe8, 0x5b, 0xe8, 0xca, 0x13, 0xd1, 0x87, 0xad, 0x10, 0xf9, 0x43, 0x4b, - 0xed, 0x59, 0x9a, 0x8b, 0x0b, 0x14, 0xc1, 0xfe, 0x7a, 0x7a, 0x44, 0x0f, 0xeb, 0xbd, 0x94, 0xa7, - 0x53, 0xff, 0x93, 0x56, 0xba, 0x86, 0xf6, 0x17, 0xd0, 0xb7, 0x66, 0x3d, 0x64, 0x13, 0xbb, 0x3d, - 0x5d, 0xfa, 0x47, 0x75, 0xdb, 0xc6, 0xdb, 0x0f, 0x30, 0xb0, 0x87, 0x12, 0x74, 0x74, 0xf9, 0x6c, - 0xe6, 0x3f, 0x68, 0x98, 0x66, 0xd0, 0x14, 0xf6, 0x8a, 0x87, 0x13, 0xf9, 0x96, 0x72, 0xe9, 0xa1, - 0xf7, 0xef, 0x55, 0xee, 0x19, 0x27, 0xa7, 0x70, 0x4b, 0x37, 0xe4, 0xe2, 0x08, 0xf4, 0x41, 0x9b, - 0x5e, 0x5d, 0xc1, 0x49, 0x0a, 0x77, 0xab, 0xaf, 0x11, 0x1a, 0xb7, 0xed, 0x1c, 0xfe, 0xc7, 0x2d, - 0x34, 0x35, 0xf0, 0xb7, 0x3d, 0xf5, 0x47, 0xf2, 0xf9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x51, - 0xd9, 0x74, 0xbb, 0xc3, 0x0c, 0x00, 0x00, -} diff --git a/pkg/plugin/generated/ObjectStore.pb.go b/pkg/plugin/generated/ObjectStore.pb.go index b22cf13799..837f65b33e 100644 --- a/pkg/plugin/generated/ObjectStore.pb.go +++ b/pkg/plugin/generated/ObjectStore.pb.go @@ -1,400 +1,1159 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: ObjectStore.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type PutObjectRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` } -func (m *PutObjectRequest) Reset() { *m = PutObjectRequest{} } -func (m *PutObjectRequest) String() string { return proto.CompactTextString(m) } -func (*PutObjectRequest) ProtoMessage() {} -func (*PutObjectRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } +func (x *PutObjectRequest) Reset() { + *x = PutObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *PutObjectRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (*PutObjectRequest) ProtoMessage() {} + +func (x *PutObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutObjectRequest.ProtoReflect.Descriptor instead. +func (*PutObjectRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{0} +} + +func (x *PutObjectRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *PutObjectRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *PutObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *PutObjectRequest) GetKey() string { - if m != nil { - return m.Key +func (x *PutObjectRequest) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *PutObjectRequest) GetBody() []byte { - if m != nil { - return m.Body +func (x *PutObjectRequest) GetBody() []byte { + if x != nil { + return x.Body } return nil } type ObjectExistsRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *ObjectExistsRequest) Reset() { + *x = ObjectExistsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectExistsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ObjectExistsRequest) Reset() { *m = ObjectExistsRequest{} } -func (m *ObjectExistsRequest) String() string { return proto.CompactTextString(m) } -func (*ObjectExistsRequest) ProtoMessage() {} -func (*ObjectExistsRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } +func (*ObjectExistsRequest) ProtoMessage() {} -func (m *ObjectExistsRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ObjectExistsRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectExistsRequest.ProtoReflect.Descriptor instead. +func (*ObjectExistsRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{1} +} + +func (x *ObjectExistsRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ObjectExistsRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *ObjectExistsRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *ObjectExistsRequest) GetKey() string { - if m != nil { - return m.Key +func (x *ObjectExistsRequest) GetKey() string { + if x != nil { + return x.Key } return "" } type ObjectExistsResponse struct { - Exists bool `protobuf:"varint,1,opt,name=exists" json:"exists,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` +} + +func (x *ObjectExistsResponse) Reset() { + *x = ObjectExistsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectExistsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ObjectExistsResponse) Reset() { *m = ObjectExistsResponse{} } -func (m *ObjectExistsResponse) String() string { return proto.CompactTextString(m) } -func (*ObjectExistsResponse) ProtoMessage() {} -func (*ObjectExistsResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } +func (*ObjectExistsResponse) ProtoMessage() {} -func (m *ObjectExistsResponse) GetExists() bool { - if m != nil { - return m.Exists +func (x *ObjectExistsResponse) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectExistsResponse.ProtoReflect.Descriptor instead. +func (*ObjectExistsResponse) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{2} +} + +func (x *ObjectExistsResponse) GetExists() bool { + if x != nil { + return x.Exists } return false } type GetObjectRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` } -func (m *GetObjectRequest) Reset() { *m = GetObjectRequest{} } -func (m *GetObjectRequest) String() string { return proto.CompactTextString(m) } -func (*GetObjectRequest) ProtoMessage() {} -func (*GetObjectRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } +func (x *GetObjectRequest) Reset() { + *x = GetObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *GetObjectRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (*GetObjectRequest) ProtoMessage() {} + +func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. +func (*GetObjectRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{3} +} + +func (x *GetObjectRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *GetObjectRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *GetObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *GetObjectRequest) GetKey() string { - if m != nil { - return m.Key +func (x *GetObjectRequest) GetKey() string { + if x != nil { + return x.Key } return "" } type Bytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Bytes) Reset() { *m = Bytes{} } -func (m *Bytes) String() string { return proto.CompactTextString(m) } -func (*Bytes) ProtoMessage() {} -func (*Bytes) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} } +func (x *Bytes) Reset() { + *x = Bytes{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bytes) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *Bytes) GetData() []byte { - if m != nil { - return m.Data +func (*Bytes) ProtoMessage() {} + +func (x *Bytes) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bytes.ProtoReflect.Descriptor instead. +func (*Bytes) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{4} +} + +func (x *Bytes) GetData() []byte { + if x != nil { + return x.Data } return nil } type ListCommonPrefixesRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Delimiter string `protobuf:"bytes,3,opt,name=delimiter" json:"delimiter,omitempty"` - Prefix string `protobuf:"bytes,4,opt,name=prefix" json:"prefix,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Delimiter string `protobuf:"bytes,3,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` } -func (m *ListCommonPrefixesRequest) Reset() { *m = ListCommonPrefixesRequest{} } -func (m *ListCommonPrefixesRequest) String() string { return proto.CompactTextString(m) } -func (*ListCommonPrefixesRequest) ProtoMessage() {} -func (*ListCommonPrefixesRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} } +func (x *ListCommonPrefixesRequest) Reset() { + *x = ListCommonPrefixesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *ListCommonPrefixesRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ListCommonPrefixesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCommonPrefixesRequest) ProtoMessage() {} + +func (x *ListCommonPrefixesRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCommonPrefixesRequest.ProtoReflect.Descriptor instead. +func (*ListCommonPrefixesRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{5} +} + +func (x *ListCommonPrefixesRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ListCommonPrefixesRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *ListCommonPrefixesRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *ListCommonPrefixesRequest) GetDelimiter() string { - if m != nil { - return m.Delimiter +func (x *ListCommonPrefixesRequest) GetDelimiter() string { + if x != nil { + return x.Delimiter } return "" } -func (m *ListCommonPrefixesRequest) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *ListCommonPrefixesRequest) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } type ListCommonPrefixesResponse struct { - Prefixes []string `protobuf:"bytes,1,rep,name=prefixes" json:"prefixes,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Prefixes []string `protobuf:"bytes,1,rep,name=prefixes,proto3" json:"prefixes,omitempty"` +} + +func (x *ListCommonPrefixesResponse) Reset() { + *x = ListCommonPrefixesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCommonPrefixesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCommonPrefixesResponse) ProtoMessage() {} + +func (x *ListCommonPrefixesResponse) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListCommonPrefixesResponse) Reset() { *m = ListCommonPrefixesResponse{} } -func (m *ListCommonPrefixesResponse) String() string { return proto.CompactTextString(m) } -func (*ListCommonPrefixesResponse) ProtoMessage() {} -func (*ListCommonPrefixesResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} } +// Deprecated: Use ListCommonPrefixesResponse.ProtoReflect.Descriptor instead. +func (*ListCommonPrefixesResponse) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{6} +} -func (m *ListCommonPrefixesResponse) GetPrefixes() []string { - if m != nil { - return m.Prefixes +func (x *ListCommonPrefixesResponse) GetPrefixes() []string { + if x != nil { + return x.Prefixes } return nil } type ListObjectsRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` } -func (m *ListObjectsRequest) Reset() { *m = ListObjectsRequest{} } -func (m *ListObjectsRequest) String() string { return proto.CompactTextString(m) } -func (*ListObjectsRequest) ProtoMessage() {} -func (*ListObjectsRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{7} } +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *ListObjectsRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{7} +} + +func (x *ListObjectsRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ListObjectsRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *ListObjectsRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *ListObjectsRequest) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *ListObjectsRequest) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } type ListObjectsResponse struct { - Keys []string `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` } -func (m *ListObjectsResponse) Reset() { *m = ListObjectsResponse{} } -func (m *ListObjectsResponse) String() string { return proto.CompactTextString(m) } -func (*ListObjectsResponse) ProtoMessage() {} -func (*ListObjectsResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{8} } +func (x *ListObjectsResponse) Reset() { + *x = ListObjectsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsResponse) ProtoMessage() {} + +func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *ListObjectsResponse) GetKeys() []string { - if m != nil { - return m.Keys +// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. +func (*ListObjectsResponse) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{8} +} + +func (x *ListObjectsResponse) GetKeys() []string { + if x != nil { + return x.Keys } return nil } type DeleteObjectRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` } -func (m *DeleteObjectRequest) Reset() { *m = DeleteObjectRequest{} } -func (m *DeleteObjectRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteObjectRequest) ProtoMessage() {} -func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{9} } +func (x *DeleteObjectRequest) Reset() { + *x = DeleteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectRequest) ProtoMessage() {} + +func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. +func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{9} +} -func (m *DeleteObjectRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *DeleteObjectRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *DeleteObjectRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *DeleteObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *DeleteObjectRequest) GetKey() string { - if m != nil { - return m.Key +func (x *DeleteObjectRequest) GetKey() string { + if x != nil { + return x.Key } return "" } type CreateSignedURLRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket" json:"bucket,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` - Ttl int64 `protobuf:"varint,4,opt,name=ttl" json:"ttl,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Ttl int64 `protobuf:"varint,4,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *CreateSignedURLRequest) Reset() { + *x = CreateSignedURLRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSignedURLRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSignedURLRequest) ProtoMessage() {} + +func (x *CreateSignedURLRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateSignedURLRequest) Reset() { *m = CreateSignedURLRequest{} } -func (m *CreateSignedURLRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSignedURLRequest) ProtoMessage() {} -func (*CreateSignedURLRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{10} } +// Deprecated: Use CreateSignedURLRequest.ProtoReflect.Descriptor instead. +func (*CreateSignedURLRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{10} +} -func (m *CreateSignedURLRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *CreateSignedURLRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *CreateSignedURLRequest) GetBucket() string { - if m != nil { - return m.Bucket +func (x *CreateSignedURLRequest) GetBucket() string { + if x != nil { + return x.Bucket } return "" } -func (m *CreateSignedURLRequest) GetKey() string { - if m != nil { - return m.Key +func (x *CreateSignedURLRequest) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *CreateSignedURLRequest) GetTtl() int64 { - if m != nil { - return m.Ttl +func (x *CreateSignedURLRequest) GetTtl() int64 { + if x != nil { + return x.Ttl } return 0 } type CreateSignedURLResponse struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *CreateSignedURLResponse) Reset() { + *x = CreateSignedURLResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateSignedURLResponse) Reset() { *m = CreateSignedURLResponse{} } -func (m *CreateSignedURLResponse) String() string { return proto.CompactTextString(m) } -func (*CreateSignedURLResponse) ProtoMessage() {} -func (*CreateSignedURLResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{11} } +func (x *CreateSignedURLResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *CreateSignedURLResponse) GetUrl() string { - if m != nil { - return m.Url +func (*CreateSignedURLResponse) ProtoMessage() {} + +func (x *CreateSignedURLResponse) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSignedURLResponse.ProtoReflect.Descriptor instead. +func (*CreateSignedURLResponse) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateSignedURLResponse) GetUrl() string { + if x != nil { + return x.Url } return "" } type ObjectStoreInitRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Config map[string]string `protobuf:"bytes,2,rep,name=config" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Config map[string]string `protobuf:"bytes,2,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ObjectStoreInitRequest) Reset() { + *x = ObjectStoreInitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ObjectStore_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ObjectStoreInitRequest) Reset() { *m = ObjectStoreInitRequest{} } -func (m *ObjectStoreInitRequest) String() string { return proto.CompactTextString(m) } -func (*ObjectStoreInitRequest) ProtoMessage() {} -func (*ObjectStoreInitRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{12} } +func (x *ObjectStoreInitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectStoreInitRequest) ProtoMessage() {} -func (m *ObjectStoreInitRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *ObjectStoreInitRequest) ProtoReflect() protoreflect.Message { + mi := &file_ObjectStore_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectStoreInitRequest.ProtoReflect.Descriptor instead. +func (*ObjectStoreInitRequest) Descriptor() ([]byte, []int) { + return file_ObjectStore_proto_rawDescGZIP(), []int{12} +} + +func (x *ObjectStoreInitRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *ObjectStoreInitRequest) GetConfig() map[string]string { - if m != nil { - return m.Config +func (x *ObjectStoreInitRequest) GetConfig() map[string]string { + if x != nil { + return x.Config } return nil } -func init() { - proto.RegisterType((*PutObjectRequest)(nil), "generated.PutObjectRequest") - proto.RegisterType((*ObjectExistsRequest)(nil), "generated.ObjectExistsRequest") - proto.RegisterType((*ObjectExistsResponse)(nil), "generated.ObjectExistsResponse") - proto.RegisterType((*GetObjectRequest)(nil), "generated.GetObjectRequest") - proto.RegisterType((*Bytes)(nil), "generated.Bytes") - proto.RegisterType((*ListCommonPrefixesRequest)(nil), "generated.ListCommonPrefixesRequest") - proto.RegisterType((*ListCommonPrefixesResponse)(nil), "generated.ListCommonPrefixesResponse") - proto.RegisterType((*ListObjectsRequest)(nil), "generated.ListObjectsRequest") - proto.RegisterType((*ListObjectsResponse)(nil), "generated.ListObjectsResponse") - proto.RegisterType((*DeleteObjectRequest)(nil), "generated.DeleteObjectRequest") - proto.RegisterType((*CreateSignedURLRequest)(nil), "generated.CreateSignedURLRequest") - proto.RegisterType((*CreateSignedURLResponse)(nil), "generated.CreateSignedURLResponse") - proto.RegisterType((*ObjectStoreInitRequest)(nil), "generated.ObjectStoreInitRequest") +var File_ObjectStore_proto protoreflect.FileDescriptor + +var file_ObjectStore_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x1a, 0x0c, + 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, 0x10, + 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x57, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, + 0x2e, 0x0a, 0x14, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, + 0x54, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1b, 0x0a, 0x05, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x22, 0x5c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x29, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x57, 0x0a, 0x13, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x22, 0x6c, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x22, 0x2b, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xb2, 0x01, + 0x0a, 0x16, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x12, 0x45, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x32, 0xe4, 0x04, 0x0a, 0x0b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, + 0x72, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, + 0x72, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x3c, 0x0a, 0x09, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x50, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x12, 0x4f, 0x0a, + 0x0c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x2e, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x30, 0x01, 0x12, 0x61, 0x0a, 0x12, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x65, 0x73, 0x12, 0x24, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1d, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, + 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x58, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, + 0x52, 0x4c, 0x12, 0x21, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x52, 0x4c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x52, + 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, + 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_ObjectStore_proto_rawDescOnce sync.Once + file_ObjectStore_proto_rawDescData = file_ObjectStore_proto_rawDesc +) + +func file_ObjectStore_proto_rawDescGZIP() []byte { + file_ObjectStore_proto_rawDescOnce.Do(func() { + file_ObjectStore_proto_rawDescData = protoimpl.X.CompressGZIP(file_ObjectStore_proto_rawDescData) + }) + return file_ObjectStore_proto_rawDescData +} + +var file_ObjectStore_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_ObjectStore_proto_goTypes = []interface{}{ + (*PutObjectRequest)(nil), // 0: generated.PutObjectRequest + (*ObjectExistsRequest)(nil), // 1: generated.ObjectExistsRequest + (*ObjectExistsResponse)(nil), // 2: generated.ObjectExistsResponse + (*GetObjectRequest)(nil), // 3: generated.GetObjectRequest + (*Bytes)(nil), // 4: generated.Bytes + (*ListCommonPrefixesRequest)(nil), // 5: generated.ListCommonPrefixesRequest + (*ListCommonPrefixesResponse)(nil), // 6: generated.ListCommonPrefixesResponse + (*ListObjectsRequest)(nil), // 7: generated.ListObjectsRequest + (*ListObjectsResponse)(nil), // 8: generated.ListObjectsResponse + (*DeleteObjectRequest)(nil), // 9: generated.DeleteObjectRequest + (*CreateSignedURLRequest)(nil), // 10: generated.CreateSignedURLRequest + (*CreateSignedURLResponse)(nil), // 11: generated.CreateSignedURLResponse + (*ObjectStoreInitRequest)(nil), // 12: generated.ObjectStoreInitRequest + nil, // 13: generated.ObjectStoreInitRequest.ConfigEntry + (*Empty)(nil), // 14: generated.Empty +} +var file_ObjectStore_proto_depIdxs = []int32{ + 13, // 0: generated.ObjectStoreInitRequest.config:type_name -> generated.ObjectStoreInitRequest.ConfigEntry + 12, // 1: generated.ObjectStore.Init:input_type -> generated.ObjectStoreInitRequest + 0, // 2: generated.ObjectStore.PutObject:input_type -> generated.PutObjectRequest + 1, // 3: generated.ObjectStore.ObjectExists:input_type -> generated.ObjectExistsRequest + 3, // 4: generated.ObjectStore.GetObject:input_type -> generated.GetObjectRequest + 5, // 5: generated.ObjectStore.ListCommonPrefixes:input_type -> generated.ListCommonPrefixesRequest + 7, // 6: generated.ObjectStore.ListObjects:input_type -> generated.ListObjectsRequest + 9, // 7: generated.ObjectStore.DeleteObject:input_type -> generated.DeleteObjectRequest + 10, // 8: generated.ObjectStore.CreateSignedURL:input_type -> generated.CreateSignedURLRequest + 14, // 9: generated.ObjectStore.Init:output_type -> generated.Empty + 14, // 10: generated.ObjectStore.PutObject:output_type -> generated.Empty + 2, // 11: generated.ObjectStore.ObjectExists:output_type -> generated.ObjectExistsResponse + 4, // 12: generated.ObjectStore.GetObject:output_type -> generated.Bytes + 6, // 13: generated.ObjectStore.ListCommonPrefixes:output_type -> generated.ListCommonPrefixesResponse + 8, // 14: generated.ObjectStore.ListObjects:output_type -> generated.ListObjectsResponse + 14, // 15: generated.ObjectStore.DeleteObject:output_type -> generated.Empty + 11, // 16: generated.ObjectStore.CreateSignedURL:output_type -> generated.CreateSignedURLResponse + 9, // [9:17] is the sub-list for method output_type + 1, // [1:9] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_ObjectStore_proto_init() } +func file_ObjectStore_proto_init() { + if File_ObjectStore_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_ObjectStore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectExistsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectExistsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCommonPrefixesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListCommonPrefixesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSignedURLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSignedURLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ObjectStore_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectStoreInitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ObjectStore_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_ObjectStore_proto_goTypes, + DependencyIndexes: file_ObjectStore_proto_depIdxs, + MessageInfos: file_ObjectStore_proto_msgTypes, + }.Build() + File_ObjectStore_proto = out.File + file_ObjectStore_proto_rawDesc = nil + file_ObjectStore_proto_goTypes = nil + file_ObjectStore_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for ObjectStore service +const _ = grpc.SupportPackageIsVersion6 +// ObjectStoreClient is the client API for ObjectStore service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ObjectStoreClient interface { Init(ctx context.Context, in *ObjectStoreInitRequest, opts ...grpc.CallOption) (*Empty, error) PutObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStore_PutObjectClient, error) @@ -407,16 +1166,16 @@ type ObjectStoreClient interface { } type objectStoreClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewObjectStoreClient(cc *grpc.ClientConn) ObjectStoreClient { +func NewObjectStoreClient(cc grpc.ClientConnInterface) ObjectStoreClient { return &objectStoreClient{cc} } func (c *objectStoreClient) Init(ctx context.Context, in *ObjectStoreInitRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.ObjectStore/Init", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/Init", in, out, opts...) if err != nil { return nil, err } @@ -424,7 +1183,7 @@ func (c *objectStoreClient) Init(ctx context.Context, in *ObjectStoreInitRequest } func (c *objectStoreClient) PutObject(ctx context.Context, opts ...grpc.CallOption) (ObjectStore_PutObjectClient, error) { - stream, err := grpc.NewClientStream(ctx, &_ObjectStore_serviceDesc.Streams[0], c.cc, "/generated.ObjectStore/PutObject", opts...) + stream, err := c.cc.NewStream(ctx, &_ObjectStore_serviceDesc.Streams[0], "/generated.ObjectStore/PutObject", opts...) if err != nil { return nil, err } @@ -459,7 +1218,7 @@ func (x *objectStorePutObjectClient) CloseAndRecv() (*Empty, error) { func (c *objectStoreClient) ObjectExists(ctx context.Context, in *ObjectExistsRequest, opts ...grpc.CallOption) (*ObjectExistsResponse, error) { out := new(ObjectExistsResponse) - err := grpc.Invoke(ctx, "/generated.ObjectStore/ObjectExists", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/ObjectExists", in, out, opts...) if err != nil { return nil, err } @@ -467,7 +1226,7 @@ func (c *objectStoreClient) ObjectExists(ctx context.Context, in *ObjectExistsRe } func (c *objectStoreClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (ObjectStore_GetObjectClient, error) { - stream, err := grpc.NewClientStream(ctx, &_ObjectStore_serviceDesc.Streams[1], c.cc, "/generated.ObjectStore/GetObject", opts...) + stream, err := c.cc.NewStream(ctx, &_ObjectStore_serviceDesc.Streams[1], "/generated.ObjectStore/GetObject", opts...) if err != nil { return nil, err } @@ -500,7 +1259,7 @@ func (x *objectStoreGetObjectClient) Recv() (*Bytes, error) { func (c *objectStoreClient) ListCommonPrefixes(ctx context.Context, in *ListCommonPrefixesRequest, opts ...grpc.CallOption) (*ListCommonPrefixesResponse, error) { out := new(ListCommonPrefixesResponse) - err := grpc.Invoke(ctx, "/generated.ObjectStore/ListCommonPrefixes", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/ListCommonPrefixes", in, out, opts...) if err != nil { return nil, err } @@ -509,7 +1268,7 @@ func (c *objectStoreClient) ListCommonPrefixes(ctx context.Context, in *ListComm func (c *objectStoreClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { out := new(ListObjectsResponse) - err := grpc.Invoke(ctx, "/generated.ObjectStore/ListObjects", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/ListObjects", in, out, opts...) if err != nil { return nil, err } @@ -518,7 +1277,7 @@ func (c *objectStoreClient) ListObjects(ctx context.Context, in *ListObjectsRequ func (c *objectStoreClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.ObjectStore/DeleteObject", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/DeleteObject", in, out, opts...) if err != nil { return nil, err } @@ -527,15 +1286,14 @@ func (c *objectStoreClient) DeleteObject(ctx context.Context, in *DeleteObjectRe func (c *objectStoreClient) CreateSignedURL(ctx context.Context, in *CreateSignedURLRequest, opts ...grpc.CallOption) (*CreateSignedURLResponse, error) { out := new(CreateSignedURLResponse) - err := grpc.Invoke(ctx, "/generated.ObjectStore/CreateSignedURL", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.ObjectStore/CreateSignedURL", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for ObjectStore service - +// ObjectStoreServer is the server API for ObjectStore service. type ObjectStoreServer interface { Init(context.Context, *ObjectStoreInitRequest) (*Empty, error) PutObject(ObjectStore_PutObjectServer) error @@ -547,6 +1305,35 @@ type ObjectStoreServer interface { CreateSignedURL(context.Context, *CreateSignedURLRequest) (*CreateSignedURLResponse, error) } +// UnimplementedObjectStoreServer can be embedded to have forward compatible implementations. +type UnimplementedObjectStoreServer struct { +} + +func (*UnimplementedObjectStoreServer) Init(context.Context, *ObjectStoreInitRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") +} +func (*UnimplementedObjectStoreServer) PutObject(ObjectStore_PutObjectServer) error { + return status.Errorf(codes.Unimplemented, "method PutObject not implemented") +} +func (*UnimplementedObjectStoreServer) ObjectExists(context.Context, *ObjectExistsRequest) (*ObjectExistsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ObjectExists not implemented") +} +func (*UnimplementedObjectStoreServer) GetObject(*GetObjectRequest, ObjectStore_GetObjectServer) error { + return status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedObjectStoreServer) ListCommonPrefixes(context.Context, *ListCommonPrefixesRequest) (*ListCommonPrefixesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListCommonPrefixes not implemented") +} +func (*UnimplementedObjectStoreServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (*UnimplementedObjectStoreServer) DeleteObject(context.Context, *DeleteObjectRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (*UnimplementedObjectStoreServer) CreateSignedURL(context.Context, *CreateSignedURLRequest) (*CreateSignedURLResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSignedURL not implemented") +} + func RegisterObjectStoreServer(s *grpc.Server, srv ObjectStoreServer) { s.RegisterService(&_ObjectStore_serviceDesc, srv) } @@ -749,46 +1536,3 @@ var _ObjectStore_serviceDesc = grpc.ServiceDesc{ }, Metadata: "ObjectStore.proto", } - -func init() { proto.RegisterFile("ObjectStore.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 577 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xd6, 0xc6, 0x69, 0x54, 0x4f, 0x22, 0x61, 0xb6, 0x55, 0x30, 0x2e, 0x94, 0xb0, 0x02, 0x29, - 0x08, 0x11, 0xa1, 0x72, 0x29, 0xd0, 0x03, 0x22, 0x44, 0x08, 0x29, 0x52, 0x2b, 0x07, 0x04, 0x07, - 0x2e, 0x4e, 0x3c, 0x4d, 0x4d, 0x1c, 0x3b, 0xd8, 0x6b, 0x54, 0x1f, 0x79, 0x25, 0x9e, 0x89, 0x07, - 0x41, 0xbb, 0xde, 0x26, 0xde, 0xfc, 0x10, 0xa9, 0xca, 0x6d, 0x66, 0x76, 0xe7, 0x9b, 0x6f, 0x66, - 0xfd, 0x8d, 0xe1, 0xee, 0xf9, 0xf0, 0x07, 0x8e, 0xf8, 0x80, 0xc7, 0x09, 0x76, 0x66, 0x49, 0xcc, - 0x63, 0x6a, 0x8e, 0x31, 0xc2, 0xc4, 0xe3, 0xe8, 0x3b, 0x8d, 0xc1, 0x95, 0x97, 0xa0, 0x5f, 0x1c, - 0xb0, 0x2b, 0xb0, 0x2e, 0x32, 0x5e, 0x24, 0xb8, 0xf8, 0x33, 0xc3, 0x94, 0xd3, 0x26, 0xd4, 0x66, - 0x61, 0x36, 0x0e, 0x22, 0x9b, 0xb4, 0x48, 0xdb, 0x74, 0x95, 0x27, 0xe2, 0xc3, 0x6c, 0x34, 0x41, - 0x6e, 0x57, 0x8a, 0x78, 0xe1, 0x51, 0x0b, 0x8c, 0x09, 0xe6, 0xb6, 0x21, 0x83, 0xc2, 0xa4, 0x14, - 0xaa, 0xc3, 0xd8, 0xcf, 0xed, 0x6a, 0x8b, 0xb4, 0x1b, 0xae, 0xb4, 0xd9, 0x57, 0x38, 0x28, 0xca, - 0xf4, 0xae, 0x83, 0x94, 0xa7, 0x3b, 0x2b, 0xc6, 0x3a, 0x70, 0xa8, 0x03, 0xa7, 0xb3, 0x38, 0x4a, - 0x51, 0x20, 0xa0, 0x8c, 0x48, 0xe4, 0x7d, 0x57, 0x79, 0xec, 0x33, 0x58, 0x1f, 0x71, 0xd7, 0x2d, - 0xb3, 0x23, 0xd8, 0x7b, 0x9f, 0x73, 0x4c, 0x45, 0xef, 0xbe, 0xc7, 0x3d, 0x09, 0xd4, 0x70, 0xa5, - 0xcd, 0x7e, 0x13, 0xb8, 0xdf, 0x0f, 0x52, 0xde, 0x8d, 0xa7, 0xd3, 0x38, 0xba, 0x48, 0xf0, 0x32, - 0xb8, 0xc6, 0x5b, 0x8f, 0xe0, 0x01, 0x98, 0x3e, 0x86, 0xc1, 0x34, 0xe0, 0x98, 0x28, 0x0a, 0x8b, - 0x80, 0x44, 0x93, 0x05, 0xe4, 0xf4, 0x05, 0x9a, 0xf4, 0xd8, 0x29, 0x38, 0xeb, 0x28, 0xa8, 0x61, - 0x39, 0xb0, 0x3f, 0x53, 0x31, 0x9b, 0xb4, 0x8c, 0xb6, 0xe9, 0xce, 0x7d, 0xf6, 0x1d, 0xa8, 0xc8, - 0x2c, 0x26, 0x76, 0x6b, 0xd6, 0x0b, 0x5e, 0x86, 0xc6, 0xeb, 0x19, 0x1c, 0x68, 0xe8, 0x8a, 0x10, - 0x85, 0xea, 0x04, 0xf3, 0x1b, 0x32, 0xd2, 0x16, 0x9f, 0xd0, 0x07, 0x0c, 0x91, 0xe3, 0xae, 0x1f, - 0x2f, 0x84, 0x66, 0x37, 0x41, 0x8f, 0xe3, 0x20, 0x18, 0x47, 0xe8, 0x7f, 0x71, 0xfb, 0xbb, 0xd3, - 0x82, 0x05, 0x06, 0xe7, 0xa1, 0x7c, 0x0c, 0xc3, 0x15, 0x26, 0x7b, 0x0e, 0xf7, 0x56, 0xaa, 0xa9, - 0xae, 0x2d, 0x30, 0xb2, 0x24, 0x54, 0xb5, 0x84, 0xc9, 0xfe, 0x10, 0x68, 0x96, 0xf4, 0xfc, 0x29, - 0x0a, 0xb6, 0xf6, 0xdd, 0x83, 0xda, 0x28, 0x8e, 0x2e, 0x83, 0xb1, 0x5d, 0x69, 0x19, 0xed, 0xfa, - 0xc9, 0x8b, 0xce, 0x5c, 0xfd, 0x9d, 0xf5, 0x50, 0x9d, 0xae, 0xbc, 0xdf, 0x8b, 0x78, 0x92, 0xbb, - 0x2a, 0xd9, 0x79, 0x0d, 0xf5, 0x52, 0xf8, 0xa6, 0x33, 0xb2, 0xe8, 0xec, 0x10, 0xf6, 0x7e, 0x79, - 0x61, 0x86, 0x6a, 0x04, 0x85, 0xf3, 0xa6, 0x72, 0x4a, 0x4e, 0xfe, 0x56, 0xa1, 0x5e, 0xaa, 0x44, - 0xdf, 0x42, 0x55, 0x54, 0xa3, 0x8f, 0xb7, 0x32, 0x71, 0xac, 0xd2, 0x95, 0xde, 0x74, 0xc6, 0x73, - 0x7a, 0x06, 0xe6, 0x7c, 0x45, 0xd1, 0xa3, 0xd2, 0xf1, 0xf2, 0xe2, 0x5a, 0xcd, 0x6d, 0x13, 0x7a, - 0x0e, 0x8d, 0xf2, 0x76, 0xa0, 0xc7, 0x2b, 0x14, 0xb4, 0x7d, 0xe4, 0x3c, 0xda, 0x78, 0xae, 0x9e, - 0xe8, 0x0c, 0xcc, 0xf9, 0xfa, 0xd0, 0xe8, 0x2c, 0x2f, 0x15, 0x8d, 0x8e, 0xdc, 0x0d, 0x2f, 0x09, - 0xf5, 0x0a, 0x2d, 0xe9, 0x2a, 0xa4, 0x4f, 0x4a, 0x37, 0x37, 0xee, 0x09, 0xe7, 0xe9, 0x96, 0x5b, - 0x8a, 0x60, 0x1f, 0xea, 0x25, 0x41, 0xd1, 0x87, 0x4b, 0x59, 0xba, 0x8c, 0x9d, 0xe3, 0x4d, 0xc7, - 0x0a, 0xed, 0x1d, 0x34, 0xca, 0x9a, 0xd3, 0xe6, 0xb7, 0x46, 0x8c, 0x6b, 0xde, 0xef, 0x1b, 0xdc, - 0x59, 0xfa, 0xdc, 0xb5, 0xef, 0x60, 0xbd, 0xf0, 0x1c, 0xf6, 0xbf, 0x2b, 0x05, 0xb7, 0x61, 0x4d, - 0xfe, 0xc3, 0x5e, 0xfd, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x4e, 0x76, 0xa3, 0xf1, 0x06, 0x00, - 0x00, -} diff --git a/pkg/plugin/generated/PluginLister.pb.go b/pkg/plugin/generated/PluginLister.pb.go index a2d7957d9c..85398bc86c 100644 --- a/pkg/plugin/generated/PluginLister.pb.go +++ b/pkg/plugin/generated/PluginLister.pb.go @@ -1,112 +1,298 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: PluginLister.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type PluginIdentifier struct { - Command string `protobuf:"bytes,1,opt,name=command" json:"command,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Command string `protobuf:"bytes,1,opt,name=command,proto3" json:"command,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *PluginIdentifier) Reset() { + *x = PluginIdentifier{} + if protoimpl.UnsafeEnabled { + mi := &file_PluginLister_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PluginIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginIdentifier) ProtoMessage() {} + +func (x *PluginIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_PluginLister_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *PluginIdentifier) Reset() { *m = PluginIdentifier{} } -func (m *PluginIdentifier) String() string { return proto.CompactTextString(m) } -func (*PluginIdentifier) ProtoMessage() {} -func (*PluginIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } +// Deprecated: Use PluginIdentifier.ProtoReflect.Descriptor instead. +func (*PluginIdentifier) Descriptor() ([]byte, []int) { + return file_PluginLister_proto_rawDescGZIP(), []int{0} +} -func (m *PluginIdentifier) GetCommand() string { - if m != nil { - return m.Command +func (x *PluginIdentifier) GetCommand() string { + if x != nil { + return x.Command } return "" } -func (m *PluginIdentifier) GetKind() string { - if m != nil { - return m.Kind +func (x *PluginIdentifier) GetKind() string { + if x != nil { + return x.Kind } return "" } -func (m *PluginIdentifier) GetName() string { - if m != nil { - return m.Name +func (x *PluginIdentifier) GetName() string { + if x != nil { + return x.Name } return "" } type ListPluginsResponse struct { - Plugins []*PluginIdentifier `protobuf:"bytes,1,rep,name=plugins" json:"plugins,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugins []*PluginIdentifier `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins,omitempty"` +} + +func (x *ListPluginsResponse) Reset() { + *x = ListPluginsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_PluginLister_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPluginsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListPluginsResponse) Reset() { *m = ListPluginsResponse{} } -func (m *ListPluginsResponse) String() string { return proto.CompactTextString(m) } -func (*ListPluginsResponse) ProtoMessage() {} -func (*ListPluginsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } +func (*ListPluginsResponse) ProtoMessage() {} -func (m *ListPluginsResponse) GetPlugins() []*PluginIdentifier { - if m != nil { - return m.Plugins +func (x *ListPluginsResponse) ProtoReflect() protoreflect.Message { + mi := &file_PluginLister_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPluginsResponse.ProtoReflect.Descriptor instead. +func (*ListPluginsResponse) Descriptor() ([]byte, []int) { + return file_PluginLister_proto_rawDescGZIP(), []int{1} +} + +func (x *ListPluginsResponse) GetPlugins() []*PluginIdentifier { + if x != nil { + return x.Plugins } return nil } -func init() { - proto.RegisterType((*PluginIdentifier)(nil), "generated.PluginIdentifier") - proto.RegisterType((*ListPluginsResponse)(nil), "generated.ListPluginsResponse") +var File_PluginLister_proto protoreflect.FileDescriptor + +var file_PluginLister_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x1a, + 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x54, 0x0a, + 0x10, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x07, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x32, 0x4f, 0x0a, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x3f, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x12, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x1e, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, 0x65, + 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_PluginLister_proto_rawDescOnce sync.Once + file_PluginLister_proto_rawDescData = file_PluginLister_proto_rawDesc +) + +func file_PluginLister_proto_rawDescGZIP() []byte { + file_PluginLister_proto_rawDescOnce.Do(func() { + file_PluginLister_proto_rawDescData = protoimpl.X.CompressGZIP(file_PluginLister_proto_rawDescData) + }) + return file_PluginLister_proto_rawDescData +} + +var file_PluginLister_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_PluginLister_proto_goTypes = []interface{}{ + (*PluginIdentifier)(nil), // 0: generated.PluginIdentifier + (*ListPluginsResponse)(nil), // 1: generated.ListPluginsResponse + (*Empty)(nil), // 2: generated.Empty +} +var file_PluginLister_proto_depIdxs = []int32{ + 0, // 0: generated.ListPluginsResponse.plugins:type_name -> generated.PluginIdentifier + 2, // 1: generated.PluginLister.ListPlugins:input_type -> generated.Empty + 1, // 2: generated.PluginLister.ListPlugins:output_type -> generated.ListPluginsResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_PluginLister_proto_init() } +func file_PluginLister_proto_init() { + if File_PluginLister_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_PluginLister_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_PluginLister_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPluginsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_PluginLister_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_PluginLister_proto_goTypes, + DependencyIndexes: file_PluginLister_proto_depIdxs, + MessageInfos: file_PluginLister_proto_msgTypes, + }.Build() + File_PluginLister_proto = out.File + file_PluginLister_proto_rawDesc = nil + file_PluginLister_proto_goTypes = nil + file_PluginLister_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for PluginLister service +const _ = grpc.SupportPackageIsVersion6 +// PluginListerClient is the client API for PluginLister service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type PluginListerClient interface { ListPlugins(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ListPluginsResponse, error) } type pluginListerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewPluginListerClient(cc *grpc.ClientConn) PluginListerClient { +func NewPluginListerClient(cc grpc.ClientConnInterface) PluginListerClient { return &pluginListerClient{cc} } func (c *pluginListerClient) ListPlugins(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ListPluginsResponse, error) { out := new(ListPluginsResponse) - err := grpc.Invoke(ctx, "/generated.PluginLister/ListPlugins", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.PluginLister/ListPlugins", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for PluginLister service - +// PluginListerServer is the server API for PluginLister service. type PluginListerServer interface { ListPlugins(context.Context, *Empty) (*ListPluginsResponse, error) } +// UnimplementedPluginListerServer can be embedded to have forward compatible implementations. +type UnimplementedPluginListerServer struct { +} + +func (*UnimplementedPluginListerServer) ListPlugins(context.Context, *Empty) (*ListPluginsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPlugins not implemented") +} + func RegisterPluginListerServer(s *grpc.Server, srv PluginListerServer) { s.RegisterService(&_PluginLister_serviceDesc, srv) } @@ -141,22 +327,3 @@ var _PluginLister_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "PluginLister.proto", } - -func init() { proto.RegisterFile("PluginLister.proto", fileDescriptor4) } - -var fileDescriptor4 = []byte{ - // 201 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x0a, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xf3, 0xc9, 0x2c, 0x2e, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, - 0x4c, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0x91, 0xe2, 0x09, 0xce, 0x48, 0x2c, 0x4a, - 0x4d, 0x81, 0x48, 0x28, 0x85, 0x70, 0x09, 0x40, 0x94, 0x7b, 0xa6, 0xa4, 0xe6, 0x95, 0x64, 0xa6, - 0x65, 0xa6, 0x16, 0x09, 0x49, 0x70, 0xb1, 0x27, 0xe7, 0xe7, 0xe6, 0x26, 0xe6, 0xa5, 0x48, 0x30, - 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x42, 0x42, 0x5c, 0x2c, 0xd9, 0x99, 0x79, 0x29, 0x12, - 0x4c, 0x60, 0x61, 0x30, 0x1b, 0x24, 0x96, 0x97, 0x98, 0x9b, 0x2a, 0xc1, 0x0c, 0x11, 0x03, 0xb1, - 0x95, 0x7c, 0xb8, 0x84, 0x41, 0xd6, 0x43, 0x4c, 0x2e, 0x0e, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, - 0x4e, 0x15, 0x32, 0xe5, 0x62, 0x2f, 0x80, 0x08, 0x49, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0x49, - 0xeb, 0xc1, 0xdd, 0xa5, 0x87, 0xee, 0x8c, 0x20, 0x98, 0x5a, 0x23, 0x7f, 0x2e, 0x1e, 0x64, 0x2f, - 0x09, 0xd9, 0x73, 0x71, 0x23, 0x99, 0x2e, 0x24, 0x80, 0x64, 0x88, 0x6b, 0x6e, 0x41, 0x49, 0xa5, - 0x94, 0x1c, 0x92, 0x08, 0x16, 0x77, 0x24, 0xb1, 0x81, 0xfd, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, - 0xff, 0x0e, 0xb5, 0xe4, 0x0c, 0x2a, 0x01, 0x00, 0x00, -} diff --git a/pkg/plugin/generated/RestoreItemAction.pb.go b/pkg/plugin/generated/RestoreItemAction.pb.go index da06ffdac7..fc080853a5 100644 --- a/pkg/plugin/generated/RestoreItemAction.pb.go +++ b/pkg/plugin/generated/RestoreItemAction.pb.go @@ -1,167 +1,457 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: RestoreItemAction.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type RestoreItemActionExecuteRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` Restore []byte `protobuf:"bytes,3,opt,name=restore,proto3" json:"restore,omitempty"` ItemFromBackup []byte `protobuf:"bytes,4,opt,name=itemFromBackup,proto3" json:"itemFromBackup,omitempty"` } -func (m *RestoreItemActionExecuteRequest) Reset() { *m = RestoreItemActionExecuteRequest{} } -func (m *RestoreItemActionExecuteRequest) String() string { return proto.CompactTextString(m) } -func (*RestoreItemActionExecuteRequest) ProtoMessage() {} +func (x *RestoreItemActionExecuteRequest) Reset() { + *x = RestoreItemActionExecuteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_RestoreItemAction_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreItemActionExecuteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreItemActionExecuteRequest) ProtoMessage() {} + +func (x *RestoreItemActionExecuteRequest) ProtoReflect() protoreflect.Message { + mi := &file_RestoreItemAction_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreItemActionExecuteRequest.ProtoReflect.Descriptor instead. func (*RestoreItemActionExecuteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0} + return file_RestoreItemAction_proto_rawDescGZIP(), []int{0} } -func (m *RestoreItemActionExecuteRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *RestoreItemActionExecuteRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *RestoreItemActionExecuteRequest) GetItem() []byte { - if m != nil { - return m.Item +func (x *RestoreItemActionExecuteRequest) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *RestoreItemActionExecuteRequest) GetRestore() []byte { - if m != nil { - return m.Restore +func (x *RestoreItemActionExecuteRequest) GetRestore() []byte { + if x != nil { + return x.Restore } return nil } -func (m *RestoreItemActionExecuteRequest) GetItemFromBackup() []byte { - if m != nil { - return m.ItemFromBackup +func (x *RestoreItemActionExecuteRequest) GetItemFromBackup() []byte { + if x != nil { + return x.ItemFromBackup } return nil } type RestoreItemActionExecuteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` - AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems" json:"additionalItems,omitempty"` - SkipRestore bool `protobuf:"varint,3,opt,name=skipRestore" json:"skipRestore,omitempty"` + AdditionalItems []*ResourceIdentifier `protobuf:"bytes,2,rep,name=additionalItems,proto3" json:"additionalItems,omitempty"` + SkipRestore bool `protobuf:"varint,3,opt,name=skipRestore,proto3" json:"skipRestore,omitempty"` } -func (m *RestoreItemActionExecuteResponse) Reset() { *m = RestoreItemActionExecuteResponse{} } -func (m *RestoreItemActionExecuteResponse) String() string { return proto.CompactTextString(m) } -func (*RestoreItemActionExecuteResponse) ProtoMessage() {} +func (x *RestoreItemActionExecuteResponse) Reset() { + *x = RestoreItemActionExecuteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_RestoreItemAction_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreItemActionExecuteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreItemActionExecuteResponse) ProtoMessage() {} + +func (x *RestoreItemActionExecuteResponse) ProtoReflect() protoreflect.Message { + mi := &file_RestoreItemAction_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreItemActionExecuteResponse.ProtoReflect.Descriptor instead. func (*RestoreItemActionExecuteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{1} + return file_RestoreItemAction_proto_rawDescGZIP(), []int{1} } -func (m *RestoreItemActionExecuteResponse) GetItem() []byte { - if m != nil { - return m.Item +func (x *RestoreItemActionExecuteResponse) GetItem() []byte { + if x != nil { + return x.Item } return nil } -func (m *RestoreItemActionExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { - if m != nil { - return m.AdditionalItems +func (x *RestoreItemActionExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { + if x != nil { + return x.AdditionalItems } return nil } -func (m *RestoreItemActionExecuteResponse) GetSkipRestore() bool { - if m != nil { - return m.SkipRestore +func (x *RestoreItemActionExecuteResponse) GetSkipRestore() bool { + if x != nil { + return x.SkipRestore } return false } type RestoreItemActionAppliesToRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` } -func (m *RestoreItemActionAppliesToRequest) Reset() { *m = RestoreItemActionAppliesToRequest{} } -func (m *RestoreItemActionAppliesToRequest) String() string { return proto.CompactTextString(m) } -func (*RestoreItemActionAppliesToRequest) ProtoMessage() {} +func (x *RestoreItemActionAppliesToRequest) Reset() { + *x = RestoreItemActionAppliesToRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_RestoreItemAction_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreItemActionAppliesToRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreItemActionAppliesToRequest) ProtoMessage() {} + +func (x *RestoreItemActionAppliesToRequest) ProtoReflect() protoreflect.Message { + mi := &file_RestoreItemAction_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreItemActionAppliesToRequest.ProtoReflect.Descriptor instead. func (*RestoreItemActionAppliesToRequest) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{2} + return file_RestoreItemAction_proto_rawDescGZIP(), []int{2} } -func (m *RestoreItemActionAppliesToRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *RestoreItemActionAppliesToRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } type RestoreItemActionAppliesToResponse struct { - ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector" json:"ResourceSelector,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResourceSelector *ResourceSelector `protobuf:"bytes,1,opt,name=ResourceSelector,proto3" json:"ResourceSelector,omitempty"` +} + +func (x *RestoreItemActionAppliesToResponse) Reset() { + *x = RestoreItemActionAppliesToResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_RestoreItemAction_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreItemActionAppliesToResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreItemActionAppliesToResponse) ProtoMessage() {} + +func (x *RestoreItemActionAppliesToResponse) ProtoReflect() protoreflect.Message { + mi := &file_RestoreItemAction_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *RestoreItemActionAppliesToResponse) Reset() { *m = RestoreItemActionAppliesToResponse{} } -func (m *RestoreItemActionAppliesToResponse) String() string { return proto.CompactTextString(m) } -func (*RestoreItemActionAppliesToResponse) ProtoMessage() {} +// Deprecated: Use RestoreItemActionAppliesToResponse.ProtoReflect.Descriptor instead. func (*RestoreItemActionAppliesToResponse) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{3} + return file_RestoreItemAction_proto_rawDescGZIP(), []int{3} } -func (m *RestoreItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { - if m != nil { - return m.ResourceSelector +func (x *RestoreItemActionAppliesToResponse) GetResourceSelector() *ResourceSelector { + if x != nil { + return x.ResourceSelector } return nil } -func init() { - proto.RegisterType((*RestoreItemActionExecuteRequest)(nil), "generated.RestoreItemActionExecuteRequest") - proto.RegisterType((*RestoreItemActionExecuteResponse)(nil), "generated.RestoreItemActionExecuteResponse") - proto.RegisterType((*RestoreItemActionAppliesToRequest)(nil), "generated.RestoreItemActionAppliesToRequest") - proto.RegisterType((*RestoreItemActionAppliesToResponse)(nil), "generated.RestoreItemActionAppliesToResponse") +var File_RestoreItemAction_proto protoreflect.FileDescriptor + +var file_RestoreItemAction_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x1a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x8f, 0x01, 0x0a, 0x1f, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x74, + 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, + 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, + 0x65, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x26, 0x0a, 0x0e, + 0x69, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x69, 0x74, 0x65, 0x6d, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x22, 0xa1, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x74, 0x65, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x12, 0x47, 0x0a, + 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, + 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x22, 0x3b, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0x6d, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, + 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x52, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x32, 0xe1, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, 0x0a, 0x09, 0x41, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x12, 0x2c, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, + 0x2a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, + 0x6e, 0x7a, 0x75, 0x2f, 0x76, 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_RestoreItemAction_proto_rawDescOnce sync.Once + file_RestoreItemAction_proto_rawDescData = file_RestoreItemAction_proto_rawDesc +) + +func file_RestoreItemAction_proto_rawDescGZIP() []byte { + file_RestoreItemAction_proto_rawDescOnce.Do(func() { + file_RestoreItemAction_proto_rawDescData = protoimpl.X.CompressGZIP(file_RestoreItemAction_proto_rawDescData) + }) + return file_RestoreItemAction_proto_rawDescData +} + +var file_RestoreItemAction_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_RestoreItemAction_proto_goTypes = []interface{}{ + (*RestoreItemActionExecuteRequest)(nil), // 0: generated.RestoreItemActionExecuteRequest + (*RestoreItemActionExecuteResponse)(nil), // 1: generated.RestoreItemActionExecuteResponse + (*RestoreItemActionAppliesToRequest)(nil), // 2: generated.RestoreItemActionAppliesToRequest + (*RestoreItemActionAppliesToResponse)(nil), // 3: generated.RestoreItemActionAppliesToResponse + (*ResourceIdentifier)(nil), // 4: generated.ResourceIdentifier + (*ResourceSelector)(nil), // 5: generated.ResourceSelector +} +var file_RestoreItemAction_proto_depIdxs = []int32{ + 4, // 0: generated.RestoreItemActionExecuteResponse.additionalItems:type_name -> generated.ResourceIdentifier + 5, // 1: generated.RestoreItemActionAppliesToResponse.ResourceSelector:type_name -> generated.ResourceSelector + 2, // 2: generated.RestoreItemAction.AppliesTo:input_type -> generated.RestoreItemActionAppliesToRequest + 0, // 3: generated.RestoreItemAction.Execute:input_type -> generated.RestoreItemActionExecuteRequest + 3, // 4: generated.RestoreItemAction.AppliesTo:output_type -> generated.RestoreItemActionAppliesToResponse + 1, // 5: generated.RestoreItemAction.Execute:output_type -> generated.RestoreItemActionExecuteResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_RestoreItemAction_proto_init() } +func file_RestoreItemAction_proto_init() { + if File_RestoreItemAction_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_RestoreItemAction_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreItemActionExecuteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_RestoreItemAction_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreItemActionExecuteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_RestoreItemAction_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreItemActionAppliesToRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_RestoreItemAction_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreItemActionAppliesToResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_RestoreItemAction_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_RestoreItemAction_proto_goTypes, + DependencyIndexes: file_RestoreItemAction_proto_depIdxs, + MessageInfos: file_RestoreItemAction_proto_msgTypes, + }.Build() + File_RestoreItemAction_proto = out.File + file_RestoreItemAction_proto_rawDesc = nil + file_RestoreItemAction_proto_goTypes = nil + file_RestoreItemAction_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for RestoreItemAction service +const _ = grpc.SupportPackageIsVersion6 +// RestoreItemActionClient is the client API for RestoreItemAction service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RestoreItemActionClient interface { AppliesTo(ctx context.Context, in *RestoreItemActionAppliesToRequest, opts ...grpc.CallOption) (*RestoreItemActionAppliesToResponse, error) Execute(ctx context.Context, in *RestoreItemActionExecuteRequest, opts ...grpc.CallOption) (*RestoreItemActionExecuteResponse, error) } type restoreItemActionClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewRestoreItemActionClient(cc *grpc.ClientConn) RestoreItemActionClient { +func NewRestoreItemActionClient(cc grpc.ClientConnInterface) RestoreItemActionClient { return &restoreItemActionClient{cc} } func (c *restoreItemActionClient) AppliesTo(ctx context.Context, in *RestoreItemActionAppliesToRequest, opts ...grpc.CallOption) (*RestoreItemActionAppliesToResponse, error) { out := new(RestoreItemActionAppliesToResponse) - err := grpc.Invoke(ctx, "/generated.RestoreItemAction/AppliesTo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.RestoreItemAction/AppliesTo", in, out, opts...) if err != nil { return nil, err } @@ -170,20 +460,30 @@ func (c *restoreItemActionClient) AppliesTo(ctx context.Context, in *RestoreItem func (c *restoreItemActionClient) Execute(ctx context.Context, in *RestoreItemActionExecuteRequest, opts ...grpc.CallOption) (*RestoreItemActionExecuteResponse, error) { out := new(RestoreItemActionExecuteResponse) - err := grpc.Invoke(ctx, "/generated.RestoreItemAction/Execute", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.RestoreItemAction/Execute", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for RestoreItemAction service - +// RestoreItemActionServer is the server API for RestoreItemAction service. type RestoreItemActionServer interface { AppliesTo(context.Context, *RestoreItemActionAppliesToRequest) (*RestoreItemActionAppliesToResponse, error) Execute(context.Context, *RestoreItemActionExecuteRequest) (*RestoreItemActionExecuteResponse, error) } +// UnimplementedRestoreItemActionServer can be embedded to have forward compatible implementations. +type UnimplementedRestoreItemActionServer struct { +} + +func (*UnimplementedRestoreItemActionServer) AppliesTo(context.Context, *RestoreItemActionAppliesToRequest) (*RestoreItemActionAppliesToResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppliesTo not implemented") +} +func (*UnimplementedRestoreItemActionServer) Execute(context.Context, *RestoreItemActionExecuteRequest) (*RestoreItemActionExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") +} + func RegisterRestoreItemActionServer(s *grpc.Server, srv RestoreItemActionServer) { s.RegisterService(&_RestoreItemAction_serviceDesc, srv) } @@ -240,30 +540,3 @@ var _RestoreItemAction_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "RestoreItemAction.proto", } - -func init() { proto.RegisterFile("RestoreItemAction.proto", fileDescriptor5) } - -var fileDescriptor5 = []byte{ - // 332 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xdd, 0x4e, 0xc2, 0x30, - 0x14, 0x4e, 0x81, 0x80, 0x1c, 0x88, 0x3f, 0xbd, 0xd0, 0x06, 0x63, 0x9c, 0xbb, 0x30, 0xc4, 0x1f, - 0x2e, 0xf0, 0xd2, 0x2b, 0x4c, 0x94, 0x70, 0x5b, 0x7c, 0x81, 0xb1, 0x1d, 0xa1, 0x61, 0x5b, 0x6b, - 0xdb, 0x25, 0xbe, 0x85, 0xcf, 0xe0, 0xa3, 0xf9, 0x26, 0x86, 0x31, 0x96, 0xc1, 0x74, 0x72, 0xd7, - 0x73, 0xfa, 0x7d, 0xe7, 0xfb, 0xbe, 0xf6, 0xc0, 0x19, 0x47, 0x63, 0xa5, 0xc6, 0x89, 0xc5, 0x68, - 0xe4, 0x5b, 0x21, 0xe3, 0x81, 0xd2, 0xd2, 0x4a, 0xda, 0x9e, 0x63, 0x8c, 0xda, 0xb3, 0x18, 0xf4, - 0xba, 0xd3, 0x85, 0xa7, 0x31, 0x58, 0x5f, 0xb8, 0x9f, 0x04, 0x2e, 0x4b, 0xa4, 0xe7, 0x0f, 0xf4, - 0x13, 0x8b, 0x1c, 0xdf, 0x13, 0x34, 0x96, 0x9e, 0x42, 0x53, 0x85, 0xc9, 0x5c, 0xc4, 0x8c, 0x38, - 0xa4, 0xdf, 0xe6, 0x59, 0x45, 0x29, 0x34, 0x84, 0xc5, 0x88, 0xd5, 0x1c, 0xd2, 0xef, 0xf2, 0xf4, - 0x4c, 0x19, 0xb4, 0xf4, 0x7a, 0x1c, 0xab, 0xa7, 0xed, 0x4d, 0x49, 0xaf, 0xe1, 0x70, 0x85, 0x78, - 0xd1, 0x32, 0x7a, 0xf2, 0xfc, 0x65, 0xa2, 0x58, 0x23, 0x05, 0xec, 0x74, 0xdd, 0x2f, 0x02, 0xce, - 0xdf, 0x8e, 0x8c, 0x92, 0xb1, 0xc1, 0x5c, 0x9a, 0x14, 0xa4, 0xc7, 0x70, 0xe4, 0x05, 0x81, 0x58, - 0xc1, 0xbd, 0x70, 0x45, 0x35, 0xac, 0xe6, 0xd4, 0xfb, 0x9d, 0xe1, 0xc5, 0x20, 0x4f, 0x3f, 0xe0, - 0x68, 0x64, 0xa2, 0x7d, 0x9c, 0x04, 0x18, 0x5b, 0xf1, 0x26, 0x50, 0xf3, 0x5d, 0x16, 0x75, 0xa0, - 0x63, 0x96, 0x42, 0xf1, 0x42, 0x8e, 0x03, 0x5e, 0x6c, 0xb9, 0x8f, 0x70, 0x55, 0xb2, 0x38, 0x52, - 0x2a, 0x14, 0x68, 0x5e, 0xe5, 0x3f, 0xcf, 0xe6, 0x46, 0xe0, 0x56, 0x91, 0xb3, 0x84, 0x63, 0x38, - 0xde, 0x78, 0x9d, 0x62, 0x88, 0xbe, 0x95, 0x3a, 0x9d, 0xd3, 0x19, 0x9e, 0xff, 0x12, 0x67, 0x03, - 0xe1, 0x25, 0xd2, 0xf0, 0x9b, 0xc0, 0x49, 0x49, 0x8f, 0x2e, 0xa0, 0x9d, 0x6b, 0xd2, 0xbb, 0xed, - 0x89, 0xd5, 0xb9, 0x7a, 0xf7, 0x7b, 0xa2, 0xb3, 0x20, 0x33, 0x68, 0x65, 0xbf, 0x47, 0x6f, 0xaa, - 0x98, 0xdb, 0x4b, 0xd7, 0xbb, 0xdd, 0x0b, 0xbb, 0xd6, 0x98, 0x35, 0xd3, 0x65, 0x7e, 0xf8, 0x09, - 0x00, 0x00, 0xff, 0xff, 0x1b, 0x4c, 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x00, -} diff --git a/pkg/plugin/generated/Shared.pb.go b/pkg/plugin/generated/Shared.pb.go index 8f2716da51..1c84177dc8 100644 --- a/pkg/plugin/generated/Shared.pb.go +++ b/pkg/plugin/generated/Shared.pb.go @@ -1,190 +1,483 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: Shared.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_Shared_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_Shared_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_Shared_proto_rawDescGZIP(), []int{0} +} type Stack struct { - Frames []*StackFrame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Frames []*StackFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` +} + +func (x *Stack) Reset() { + *x = Stack{} + if protoimpl.UnsafeEnabled { + mi := &file_Shared_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stack) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Stack) Reset() { *m = Stack{} } -func (m *Stack) String() string { return proto.CompactTextString(m) } -func (*Stack) ProtoMessage() {} -func (*Stack) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } +func (*Stack) ProtoMessage() {} -func (m *Stack) GetFrames() []*StackFrame { - if m != nil { - return m.Frames +func (x *Stack) ProtoReflect() protoreflect.Message { + mi := &file_Shared_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stack.ProtoReflect.Descriptor instead. +func (*Stack) Descriptor() ([]byte, []int) { + return file_Shared_proto_rawDescGZIP(), []int{1} +} + +func (x *Stack) GetFrames() []*StackFrame { + if x != nil { + return x.Frames } return nil } type StackFrame struct { - File string `protobuf:"bytes,1,opt,name=file" json:"file,omitempty"` - Line int32 `protobuf:"varint,2,opt,name=line" json:"line,omitempty"` - Function string `protobuf:"bytes,3,opt,name=function" json:"function,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File string `protobuf:"bytes,1,opt,name=file,proto3" json:"file,omitempty"` + Line int32 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` + Function string `protobuf:"bytes,3,opt,name=function,proto3" json:"function,omitempty"` +} + +func (x *StackFrame) Reset() { + *x = StackFrame{} + if protoimpl.UnsafeEnabled { + mi := &file_Shared_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StackFrame) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StackFrame) ProtoMessage() {} + +func (x *StackFrame) ProtoReflect() protoreflect.Message { + mi := &file_Shared_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StackFrame) Reset() { *m = StackFrame{} } -func (m *StackFrame) String() string { return proto.CompactTextString(m) } -func (*StackFrame) ProtoMessage() {} -func (*StackFrame) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } +// Deprecated: Use StackFrame.ProtoReflect.Descriptor instead. +func (*StackFrame) Descriptor() ([]byte, []int) { + return file_Shared_proto_rawDescGZIP(), []int{2} +} -func (m *StackFrame) GetFile() string { - if m != nil { - return m.File +func (x *StackFrame) GetFile() string { + if x != nil { + return x.File } return "" } -func (m *StackFrame) GetLine() int32 { - if m != nil { - return m.Line +func (x *StackFrame) GetLine() int32 { + if x != nil { + return x.Line } return 0 } -func (m *StackFrame) GetFunction() string { - if m != nil { - return m.Function +func (x *StackFrame) GetFunction() string { + if x != nil { + return x.Function } return "" } type ResourceIdentifier struct { - Group string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Resource string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` - Namespace string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Group string `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` + Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } -func (m *ResourceIdentifier) Reset() { *m = ResourceIdentifier{} } -func (m *ResourceIdentifier) String() string { return proto.CompactTextString(m) } -func (*ResourceIdentifier) ProtoMessage() {} -func (*ResourceIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } +func (x *ResourceIdentifier) Reset() { + *x = ResourceIdentifier{} + if protoimpl.UnsafeEnabled { + mi := &file_Shared_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *ResourceIdentifier) GetGroup() string { - if m != nil { - return m.Group +func (*ResourceIdentifier) ProtoMessage() {} + +func (x *ResourceIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_Shared_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceIdentifier.ProtoReflect.Descriptor instead. +func (*ResourceIdentifier) Descriptor() ([]byte, []int) { + return file_Shared_proto_rawDescGZIP(), []int{3} +} + +func (x *ResourceIdentifier) GetGroup() string { + if x != nil { + return x.Group } return "" } -func (m *ResourceIdentifier) GetResource() string { - if m != nil { - return m.Resource +func (x *ResourceIdentifier) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *ResourceIdentifier) GetNamespace() string { - if m != nil { - return m.Namespace +func (x *ResourceIdentifier) GetNamespace() string { + if x != nil { + return x.Namespace } return "" } -func (m *ResourceIdentifier) GetName() string { - if m != nil { - return m.Name +func (x *ResourceIdentifier) GetName() string { + if x != nil { + return x.Name } return "" } type ResourceSelector struct { - IncludedNamespaces []string `protobuf:"bytes,1,rep,name=includedNamespaces" json:"includedNamespaces,omitempty"` - ExcludedNamespaces []string `protobuf:"bytes,2,rep,name=excludedNamespaces" json:"excludedNamespaces,omitempty"` - IncludedResources []string `protobuf:"bytes,3,rep,name=includedResources" json:"includedResources,omitempty"` - ExcludedResources []string `protobuf:"bytes,4,rep,name=excludedResources" json:"excludedResources,omitempty"` - Selector string `protobuf:"bytes,5,opt,name=selector" json:"selector,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludedNamespaces []string `protobuf:"bytes,1,rep,name=includedNamespaces,proto3" json:"includedNamespaces,omitempty"` + ExcludedNamespaces []string `protobuf:"bytes,2,rep,name=excludedNamespaces,proto3" json:"excludedNamespaces,omitempty"` + IncludedResources []string `protobuf:"bytes,3,rep,name=includedResources,proto3" json:"includedResources,omitempty"` + ExcludedResources []string `protobuf:"bytes,4,rep,name=excludedResources,proto3" json:"excludedResources,omitempty"` + Selector string `protobuf:"bytes,5,opt,name=selector,proto3" json:"selector,omitempty"` +} + +func (x *ResourceSelector) Reset() { + *x = ResourceSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_Shared_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSelector) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResourceSelector) Reset() { *m = ResourceSelector{} } -func (m *ResourceSelector) String() string { return proto.CompactTextString(m) } -func (*ResourceSelector) ProtoMessage() {} -func (*ResourceSelector) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } +func (*ResourceSelector) ProtoMessage() {} -func (m *ResourceSelector) GetIncludedNamespaces() []string { - if m != nil { - return m.IncludedNamespaces +func (x *ResourceSelector) ProtoReflect() protoreflect.Message { + mi := &file_Shared_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. +func (*ResourceSelector) Descriptor() ([]byte, []int) { + return file_Shared_proto_rawDescGZIP(), []int{4} +} + +func (x *ResourceSelector) GetIncludedNamespaces() []string { + if x != nil { + return x.IncludedNamespaces } return nil } -func (m *ResourceSelector) GetExcludedNamespaces() []string { - if m != nil { - return m.ExcludedNamespaces +func (x *ResourceSelector) GetExcludedNamespaces() []string { + if x != nil { + return x.ExcludedNamespaces } return nil } -func (m *ResourceSelector) GetIncludedResources() []string { - if m != nil { - return m.IncludedResources +func (x *ResourceSelector) GetIncludedResources() []string { + if x != nil { + return x.IncludedResources } return nil } -func (m *ResourceSelector) GetExcludedResources() []string { - if m != nil { - return m.ExcludedResources +func (x *ResourceSelector) GetExcludedResources() []string { + if x != nil { + return x.ExcludedResources } return nil } -func (m *ResourceSelector) GetSelector() string { - if m != nil { - return m.Selector +func (x *ResourceSelector) GetSelector() string { + if x != nil { + return x.Selector } return "" } -func init() { - proto.RegisterType((*Empty)(nil), "generated.Empty") - proto.RegisterType((*Stack)(nil), "generated.Stack") - proto.RegisterType((*StackFrame)(nil), "generated.StackFrame") - proto.RegisterType((*ResourceIdentifier)(nil), "generated.ResourceIdentifier") - proto.RegisterType((*ResourceSelector)(nil), "generated.ResourceSelector") -} - -func init() { proto.RegisterFile("Shared.proto", fileDescriptor6) } - -var fileDescriptor6 = []byte{ - // 294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0x4e, 0xb5, 0x30, - 0x10, 0x85, 0xc3, 0x05, 0xee, 0xff, 0x33, 0xba, 0xd0, 0x46, 0x93, 0xc6, 0xb8, 0x20, 0xac, 0x58, - 0x28, 0x0b, 0x4d, 0x7c, 0x03, 0x4d, 0xdc, 0x18, 0x53, 0x9e, 0x00, 0xcb, 0x70, 0x6d, 0xe4, 0xb6, - 0xa4, 0x94, 0xe4, 0xfa, 0xca, 0x3e, 0x85, 0x69, 0x4b, 0x61, 0x81, 0xbb, 0x39, 0x73, 0x3e, 0xce, - 0x0c, 0x93, 0xc2, 0x79, 0xfd, 0xd9, 0x68, 0x6c, 0xab, 0x41, 0x2b, 0xa3, 0x48, 0x76, 0x40, 0x89, - 0xba, 0x31, 0xd8, 0x16, 0xff, 0x20, 0x7d, 0x3e, 0x0e, 0xe6, 0xbb, 0x78, 0x82, 0xb4, 0x36, 0x0d, - 0xff, 0x22, 0xf7, 0xb0, 0xef, 0x74, 0x73, 0xc4, 0x91, 0x46, 0x79, 0x5c, 0x9e, 0x3d, 0x5c, 0x57, - 0x0b, 0x5d, 0x39, 0xe2, 0xc5, 0xba, 0x6c, 0x86, 0x8a, 0x77, 0x80, 0xb5, 0x4b, 0x08, 0x24, 0x9d, - 0xe8, 0x91, 0x46, 0x79, 0x54, 0x66, 0xcc, 0xd5, 0xb6, 0xd7, 0x0b, 0x89, 0x74, 0x97, 0x47, 0x65, - 0xca, 0x5c, 0x4d, 0x6e, 0xe0, 0x7f, 0x37, 0x49, 0x6e, 0x84, 0x92, 0x34, 0x76, 0xec, 0xa2, 0x8b, - 0x13, 0x10, 0x86, 0xa3, 0x9a, 0x34, 0xc7, 0xd7, 0x16, 0xa5, 0x11, 0x9d, 0x40, 0x4d, 0xae, 0x20, - 0x3d, 0x68, 0x35, 0x0d, 0x73, 0xb4, 0x17, 0x36, 0x47, 0xcf, 0xac, 0xcb, 0xcf, 0xd8, 0xa2, 0xc9, - 0x2d, 0x64, 0xd2, 0xae, 0x38, 0x34, 0x1c, 0xe7, 0x21, 0x6b, 0xc3, 0x6e, 0x65, 0x05, 0x4d, 0xfc, - 0xa6, 0xb6, 0x2e, 0x7e, 0x22, 0xb8, 0x08, 0xa3, 0x6b, 0xec, 0x91, 0x1b, 0xa5, 0x49, 0x05, 0x44, - 0x48, 0xde, 0x4f, 0x2d, 0xb6, 0x6f, 0xe1, 0x6b, 0x7f, 0x9b, 0x8c, 0xfd, 0xe1, 0x58, 0x1e, 0x4f, - 0x1b, 0x7e, 0xe7, 0xf9, 0xad, 0x43, 0xee, 0xe0, 0x32, 0xa4, 0x84, 0xd9, 0x23, 0x8d, 0x1d, 0xbe, - 0x35, 0x2c, 0x1d, 0x32, 0x56, 0x3a, 0xf1, 0xf4, 0xc6, 0xb0, 0xe7, 0x19, 0xe7, 0xff, 0xa0, 0xa9, - 0x3f, 0x4f, 0xd0, 0x1f, 0x7b, 0xf7, 0x16, 0x1e, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x83, - 0xa1, 0x97, 0x1b, 0x02, 0x00, 0x00, +var File_Shared_proto protoreflect.FileDescriptor + +var file_Shared_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x36, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x06, 0x66, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x0a, 0x53, 0x74, + 0x61, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x0a, 0x12, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x12, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, + 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_Shared_proto_rawDescOnce sync.Once + file_Shared_proto_rawDescData = file_Shared_proto_rawDesc +) + +func file_Shared_proto_rawDescGZIP() []byte { + file_Shared_proto_rawDescOnce.Do(func() { + file_Shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_Shared_proto_rawDescData) + }) + return file_Shared_proto_rawDescData +} + +var file_Shared_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_Shared_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: generated.Empty + (*Stack)(nil), // 1: generated.Stack + (*StackFrame)(nil), // 2: generated.StackFrame + (*ResourceIdentifier)(nil), // 3: generated.ResourceIdentifier + (*ResourceSelector)(nil), // 4: generated.ResourceSelector +} +var file_Shared_proto_depIdxs = []int32{ + 2, // 0: generated.Stack.frames:type_name -> generated.StackFrame + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_Shared_proto_init() } +func file_Shared_proto_init() { + if File_Shared_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_Shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_Shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_Shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StackFrame); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_Shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_Shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_Shared_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_Shared_proto_goTypes, + DependencyIndexes: file_Shared_proto_depIdxs, + MessageInfos: file_Shared_proto_msgTypes, + }.Build() + File_Shared_proto = out.File + file_Shared_proto_rawDesc = nil + file_Shared_proto_goTypes = nil + file_Shared_proto_depIdxs = nil } diff --git a/pkg/plugin/generated/VolumeSnapshotter.pb.go b/pkg/plugin/generated/VolumeSnapshotter.pb.go index ddb8a2d743..f6b754de78 100644 --- a/pkg/plugin/generated/VolumeSnapshotter.pb.go +++ b/pkg/plugin/generated/VolumeSnapshotter.pb.go @@ -1,359 +1,1084 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.14.0 // source: VolumeSnapshotter.proto package generated -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type CreateVolumeRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID" json:"snapshotID,omitempty"` - VolumeType string `protobuf:"bytes,3,opt,name=volumeType" json:"volumeType,omitempty"` - VolumeAZ string `protobuf:"bytes,4,opt,name=volumeAZ" json:"volumeAZ,omitempty"` - Iops int64 `protobuf:"varint,5,opt,name=iops" json:"iops,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` + VolumeType string `protobuf:"bytes,3,opt,name=volumeType,proto3" json:"volumeType,omitempty"` + VolumeAZ string `protobuf:"bytes,4,opt,name=volumeAZ,proto3" json:"volumeAZ,omitempty"` + Iops int64 `protobuf:"varint,5,opt,name=iops,proto3" json:"iops,omitempty"` +} + +func (x *CreateVolumeRequest) Reset() { + *x = CreateVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateVolumeRequest) ProtoMessage() {} + +func (x *CreateVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } -func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeRequest) ProtoMessage() {} -func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } +// Deprecated: Use CreateVolumeRequest.ProtoReflect.Descriptor instead. +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{0} +} -func (m *CreateVolumeRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *CreateVolumeRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *CreateVolumeRequest) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *CreateVolumeRequest) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } -func (m *CreateVolumeRequest) GetVolumeType() string { - if m != nil { - return m.VolumeType +func (x *CreateVolumeRequest) GetVolumeType() string { + if x != nil { + return x.VolumeType } return "" } -func (m *CreateVolumeRequest) GetVolumeAZ() string { - if m != nil { - return m.VolumeAZ +func (x *CreateVolumeRequest) GetVolumeAZ() string { + if x != nil { + return x.VolumeAZ } return "" } -func (m *CreateVolumeRequest) GetIops() int64 { - if m != nil { - return m.Iops +func (x *CreateVolumeRequest) GetIops() int64 { + if x != nil { + return x.Iops } return 0 } type CreateVolumeResponse struct { - VolumeID string `protobuf:"bytes,1,opt,name=volumeID" json:"volumeID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeID string `protobuf:"bytes,1,opt,name=volumeID,proto3" json:"volumeID,omitempty"` +} + +func (x *CreateVolumeResponse) Reset() { + *x = CreateVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } -func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeResponse) ProtoMessage() {} -func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{1} } +func (*CreateVolumeResponse) ProtoMessage() {} -func (m *CreateVolumeResponse) GetVolumeID() string { - if m != nil { - return m.VolumeID +func (x *CreateVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateVolumeResponse.ProtoReflect.Descriptor instead. +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateVolumeResponse) GetVolumeID() string { + if x != nil { + return x.VolumeID } return "" } type GetVolumeInfoRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - VolumeID string `protobuf:"bytes,2,opt,name=volumeID" json:"volumeID,omitempty"` - VolumeAZ string `protobuf:"bytes,3,opt,name=volumeAZ" json:"volumeAZ,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + VolumeID string `protobuf:"bytes,2,opt,name=volumeID,proto3" json:"volumeID,omitempty"` + VolumeAZ string `protobuf:"bytes,3,opt,name=volumeAZ,proto3" json:"volumeAZ,omitempty"` +} + +func (x *GetVolumeInfoRequest) Reset() { + *x = GetVolumeInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVolumeInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVolumeInfoRequest) ProtoMessage() {} + +func (x *GetVolumeInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetVolumeInfoRequest) Reset() { *m = GetVolumeInfoRequest{} } -func (m *GetVolumeInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetVolumeInfoRequest) ProtoMessage() {} -func (*GetVolumeInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{2} } +// Deprecated: Use GetVolumeInfoRequest.ProtoReflect.Descriptor instead. +func (*GetVolumeInfoRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{2} +} -func (m *GetVolumeInfoRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *GetVolumeInfoRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *GetVolumeInfoRequest) GetVolumeID() string { - if m != nil { - return m.VolumeID +func (x *GetVolumeInfoRequest) GetVolumeID() string { + if x != nil { + return x.VolumeID } return "" } -func (m *GetVolumeInfoRequest) GetVolumeAZ() string { - if m != nil { - return m.VolumeAZ +func (x *GetVolumeInfoRequest) GetVolumeAZ() string { + if x != nil { + return x.VolumeAZ } return "" } type GetVolumeInfoResponse struct { - VolumeType string `protobuf:"bytes,1,opt,name=volumeType" json:"volumeType,omitempty"` - Iops int64 `protobuf:"varint,2,opt,name=iops" json:"iops,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeType string `protobuf:"bytes,1,opt,name=volumeType,proto3" json:"volumeType,omitempty"` + Iops int64 `protobuf:"varint,2,opt,name=iops,proto3" json:"iops,omitempty"` +} + +func (x *GetVolumeInfoResponse) Reset() { + *x = GetVolumeInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetVolumeInfoResponse) Reset() { *m = GetVolumeInfoResponse{} } -func (m *GetVolumeInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetVolumeInfoResponse) ProtoMessage() {} -func (*GetVolumeInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{3} } +func (x *GetVolumeInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVolumeInfoResponse) ProtoMessage() {} + +func (x *GetVolumeInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *GetVolumeInfoResponse) GetVolumeType() string { - if m != nil { - return m.VolumeType +// Deprecated: Use GetVolumeInfoResponse.ProtoReflect.Descriptor instead. +func (*GetVolumeInfoResponse) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{3} +} + +func (x *GetVolumeInfoResponse) GetVolumeType() string { + if x != nil { + return x.VolumeType } return "" } -func (m *GetVolumeInfoResponse) GetIops() int64 { - if m != nil { - return m.Iops +func (x *GetVolumeInfoResponse) GetIops() int64 { + if x != nil { + return x.Iops } return 0 } type CreateSnapshotRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - VolumeID string `protobuf:"bytes,2,opt,name=volumeID" json:"volumeID,omitempty"` - VolumeAZ string `protobuf:"bytes,3,opt,name=volumeAZ" json:"volumeAZ,omitempty"` - Tags map[string]string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + VolumeID string `protobuf:"bytes,2,opt,name=volumeID,proto3" json:"volumeID,omitempty"` + VolumeAZ string `protobuf:"bytes,3,opt,name=volumeAZ,proto3" json:"volumeAZ,omitempty"` + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CreateSnapshotRequest) Reset() { + *x = CreateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } -func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotRequest) ProtoMessage() {} -func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{4} } +func (x *CreateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotRequest) ProtoMessage() {} + +func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *CreateSnapshotRequest) GetPlugin() string { - if m != nil { - return m.Plugin +// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateSnapshotRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *CreateSnapshotRequest) GetVolumeID() string { - if m != nil { - return m.VolumeID +func (x *CreateSnapshotRequest) GetVolumeID() string { + if x != nil { + return x.VolumeID } return "" } -func (m *CreateSnapshotRequest) GetVolumeAZ() string { - if m != nil { - return m.VolumeAZ +func (x *CreateSnapshotRequest) GetVolumeAZ() string { + if x != nil { + return x.VolumeAZ } return "" } -func (m *CreateSnapshotRequest) GetTags() map[string]string { - if m != nil { - return m.Tags +func (x *CreateSnapshotRequest) GetTags() map[string]string { + if x != nil { + return x.Tags } return nil } type CreateSnapshotResponse struct { - SnapshotID string `protobuf:"bytes,1,opt,name=snapshotID" json:"snapshotID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SnapshotID string `protobuf:"bytes,1,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` +} + +func (x *CreateSnapshotResponse) Reset() { + *x = CreateSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotResponse) ProtoMessage() {} + +func (x *CreateSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } -func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotResponse) ProtoMessage() {} -func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{5} } +// Deprecated: Use CreateSnapshotResponse.ProtoReflect.Descriptor instead. +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{5} +} -func (m *CreateSnapshotResponse) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *CreateSnapshotResponse) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } type DeleteSnapshotRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID" json:"snapshotID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + SnapshotID string `protobuf:"bytes,2,opt,name=snapshotID,proto3" json:"snapshotID,omitempty"` +} + +func (x *DeleteSnapshotRequest) Reset() { + *x = DeleteSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSnapshotRequest) ProtoMessage() {} + +func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } -func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotRequest) ProtoMessage() {} -func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{6} } +// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{6} +} -func (m *DeleteSnapshotRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *DeleteSnapshotRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *DeleteSnapshotRequest) GetSnapshotID() string { - if m != nil { - return m.SnapshotID +func (x *DeleteSnapshotRequest) GetSnapshotID() string { + if x != nil { + return x.SnapshotID } return "" } type GetVolumeIDRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` PersistentVolume []byte `protobuf:"bytes,2,opt,name=persistentVolume,proto3" json:"persistentVolume,omitempty"` } -func (m *GetVolumeIDRequest) Reset() { *m = GetVolumeIDRequest{} } -func (m *GetVolumeIDRequest) String() string { return proto.CompactTextString(m) } -func (*GetVolumeIDRequest) ProtoMessage() {} -func (*GetVolumeIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{7} } +func (x *GetVolumeIDRequest) Reset() { + *x = GetVolumeIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVolumeIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVolumeIDRequest) ProtoMessage() {} + +func (x *GetVolumeIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVolumeIDRequest.ProtoReflect.Descriptor instead. +func (*GetVolumeIDRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{7} +} -func (m *GetVolumeIDRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *GetVolumeIDRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *GetVolumeIDRequest) GetPersistentVolume() []byte { - if m != nil { - return m.PersistentVolume +func (x *GetVolumeIDRequest) GetPersistentVolume() []byte { + if x != nil { + return x.PersistentVolume } return nil } type GetVolumeIDResponse struct { - VolumeID string `protobuf:"bytes,1,opt,name=volumeID" json:"volumeID,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeID string `protobuf:"bytes,1,opt,name=volumeID,proto3" json:"volumeID,omitempty"` +} + +func (x *GetVolumeIDResponse) Reset() { + *x = GetVolumeIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVolumeIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVolumeIDResponse) ProtoMessage() {} + +func (x *GetVolumeIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetVolumeIDResponse) Reset() { *m = GetVolumeIDResponse{} } -func (m *GetVolumeIDResponse) String() string { return proto.CompactTextString(m) } -func (*GetVolumeIDResponse) ProtoMessage() {} -func (*GetVolumeIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{8} } +// Deprecated: Use GetVolumeIDResponse.ProtoReflect.Descriptor instead. +func (*GetVolumeIDResponse) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{8} +} -func (m *GetVolumeIDResponse) GetVolumeID() string { - if m != nil { - return m.VolumeID +func (x *GetVolumeIDResponse) GetVolumeID() string { + if x != nil { + return x.VolumeID } return "" } type SetVolumeIDRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` PersistentVolume []byte `protobuf:"bytes,2,opt,name=persistentVolume,proto3" json:"persistentVolume,omitempty"` - VolumeID string `protobuf:"bytes,3,opt,name=volumeID" json:"volumeID,omitempty"` + VolumeID string `protobuf:"bytes,3,opt,name=volumeID,proto3" json:"volumeID,omitempty"` +} + +func (x *SetVolumeIDRequest) Reset() { + *x = SetVolumeIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetVolumeIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetVolumeIDRequest) ProtoMessage() {} + +func (x *SetVolumeIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *SetVolumeIDRequest) Reset() { *m = SetVolumeIDRequest{} } -func (m *SetVolumeIDRequest) String() string { return proto.CompactTextString(m) } -func (*SetVolumeIDRequest) ProtoMessage() {} -func (*SetVolumeIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{9} } +// Deprecated: Use SetVolumeIDRequest.ProtoReflect.Descriptor instead. +func (*SetVolumeIDRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{9} +} -func (m *SetVolumeIDRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *SetVolumeIDRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *SetVolumeIDRequest) GetPersistentVolume() []byte { - if m != nil { - return m.PersistentVolume +func (x *SetVolumeIDRequest) GetPersistentVolume() []byte { + if x != nil { + return x.PersistentVolume } return nil } -func (m *SetVolumeIDRequest) GetVolumeID() string { - if m != nil { - return m.VolumeID +func (x *SetVolumeIDRequest) GetVolumeID() string { + if x != nil { + return x.VolumeID } return "" } type SetVolumeIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PersistentVolume []byte `protobuf:"bytes,1,opt,name=persistentVolume,proto3" json:"persistentVolume,omitempty"` } -func (m *SetVolumeIDResponse) Reset() { *m = SetVolumeIDResponse{} } -func (m *SetVolumeIDResponse) String() string { return proto.CompactTextString(m) } -func (*SetVolumeIDResponse) ProtoMessage() {} -func (*SetVolumeIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{10} } +func (x *SetVolumeIDResponse) Reset() { + *x = SetVolumeIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetVolumeIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetVolumeIDResponse) ProtoMessage() {} + +func (x *SetVolumeIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetVolumeIDResponse.ProtoReflect.Descriptor instead. +func (*SetVolumeIDResponse) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{10} +} -func (m *SetVolumeIDResponse) GetPersistentVolume() []byte { - if m != nil { - return m.PersistentVolume +func (x *SetVolumeIDResponse) GetPersistentVolume() []byte { + if x != nil { + return x.PersistentVolume } return nil } type VolumeSnapshotterInitRequest struct { - Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` - Config map[string]string `protobuf:"bytes,2,rep,name=config" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugin string `protobuf:"bytes,1,opt,name=plugin,proto3" json:"plugin,omitempty"` + Config map[string]string `protobuf:"bytes,2,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *VolumeSnapshotterInitRequest) Reset() { + *x = VolumeSnapshotterInitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_VolumeSnapshotter_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeSnapshotterInitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeSnapshotterInitRequest) ProtoMessage() {} + +func (x *VolumeSnapshotterInitRequest) ProtoReflect() protoreflect.Message { + mi := &file_VolumeSnapshotter_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeSnapshotterInitRequest) Reset() { *m = VolumeSnapshotterInitRequest{} } -func (m *VolumeSnapshotterInitRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSnapshotterInitRequest) ProtoMessage() {} -func (*VolumeSnapshotterInitRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{11} } +// Deprecated: Use VolumeSnapshotterInitRequest.ProtoReflect.Descriptor instead. +func (*VolumeSnapshotterInitRequest) Descriptor() ([]byte, []int) { + return file_VolumeSnapshotter_proto_rawDescGZIP(), []int{11} +} -func (m *VolumeSnapshotterInitRequest) GetPlugin() string { - if m != nil { - return m.Plugin +func (x *VolumeSnapshotterInitRequest) GetPlugin() string { + if x != nil { + return x.Plugin } return "" } -func (m *VolumeSnapshotterInitRequest) GetConfig() map[string]string { - if m != nil { - return m.Config +func (x *VolumeSnapshotterInitRequest) GetConfig() map[string]string { + if x != nil { + return x.Config } return nil } -func init() { - proto.RegisterType((*CreateVolumeRequest)(nil), "generated.CreateVolumeRequest") - proto.RegisterType((*CreateVolumeResponse)(nil), "generated.CreateVolumeResponse") - proto.RegisterType((*GetVolumeInfoRequest)(nil), "generated.GetVolumeInfoRequest") - proto.RegisterType((*GetVolumeInfoResponse)(nil), "generated.GetVolumeInfoResponse") - proto.RegisterType((*CreateSnapshotRequest)(nil), "generated.CreateSnapshotRequest") - proto.RegisterType((*CreateSnapshotResponse)(nil), "generated.CreateSnapshotResponse") - proto.RegisterType((*DeleteSnapshotRequest)(nil), "generated.DeleteSnapshotRequest") - proto.RegisterType((*GetVolumeIDRequest)(nil), "generated.GetVolumeIDRequest") - proto.RegisterType((*GetVolumeIDResponse)(nil), "generated.GetVolumeIDResponse") - proto.RegisterType((*SetVolumeIDRequest)(nil), "generated.SetVolumeIDRequest") - proto.RegisterType((*SetVolumeIDResponse)(nil), "generated.SetVolumeIDResponse") - proto.RegisterType((*VolumeSnapshotterInitRequest)(nil), "generated.VolumeSnapshotterInitRequest") +var File_VolumeSnapshotter_proto protoreflect.FileDescriptor + +var file_VolumeSnapshotter_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x1a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x12, 0x12, + 0x0a, 0x04, 0x69, 0x6f, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x69, 0x6f, + 0x70, 0x73, 0x22, 0x32, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x66, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x22, 0x4b, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6f, 0x70, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x69, 0x6f, 0x70, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x15, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x41, 0x5a, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, + 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x22, 0x4f, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x44, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x74, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, + 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x41, 0x0a, 0x13, + 0x53, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, + 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x22, + 0xbe, 0x01, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x32, 0xc0, 0x04, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x27, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x69, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5b, 0x0a, 0x18, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x1e, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x20, 0x2e, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x44, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x12, 0x20, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1d, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x53, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, + 0x53, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x76, 0x6d, 0x77, 0x61, 0x72, 0x65, 0x2d, 0x74, 0x61, 0x6e, 0x7a, 0x75, 0x2f, 0x76, + 0x65, 0x6c, 0x65, 0x72, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_VolumeSnapshotter_proto_rawDescOnce sync.Once + file_VolumeSnapshotter_proto_rawDescData = file_VolumeSnapshotter_proto_rawDesc +) + +func file_VolumeSnapshotter_proto_rawDescGZIP() []byte { + file_VolumeSnapshotter_proto_rawDescOnce.Do(func() { + file_VolumeSnapshotter_proto_rawDescData = protoimpl.X.CompressGZIP(file_VolumeSnapshotter_proto_rawDescData) + }) + return file_VolumeSnapshotter_proto_rawDescData +} + +var file_VolumeSnapshotter_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_VolumeSnapshotter_proto_goTypes = []interface{}{ + (*CreateVolumeRequest)(nil), // 0: generated.CreateVolumeRequest + (*CreateVolumeResponse)(nil), // 1: generated.CreateVolumeResponse + (*GetVolumeInfoRequest)(nil), // 2: generated.GetVolumeInfoRequest + (*GetVolumeInfoResponse)(nil), // 3: generated.GetVolumeInfoResponse + (*CreateSnapshotRequest)(nil), // 4: generated.CreateSnapshotRequest + (*CreateSnapshotResponse)(nil), // 5: generated.CreateSnapshotResponse + (*DeleteSnapshotRequest)(nil), // 6: generated.DeleteSnapshotRequest + (*GetVolumeIDRequest)(nil), // 7: generated.GetVolumeIDRequest + (*GetVolumeIDResponse)(nil), // 8: generated.GetVolumeIDResponse + (*SetVolumeIDRequest)(nil), // 9: generated.SetVolumeIDRequest + (*SetVolumeIDResponse)(nil), // 10: generated.SetVolumeIDResponse + (*VolumeSnapshotterInitRequest)(nil), // 11: generated.VolumeSnapshotterInitRequest + nil, // 12: generated.CreateSnapshotRequest.TagsEntry + nil, // 13: generated.VolumeSnapshotterInitRequest.ConfigEntry + (*Empty)(nil), // 14: generated.Empty +} +var file_VolumeSnapshotter_proto_depIdxs = []int32{ + 12, // 0: generated.CreateSnapshotRequest.tags:type_name -> generated.CreateSnapshotRequest.TagsEntry + 13, // 1: generated.VolumeSnapshotterInitRequest.config:type_name -> generated.VolumeSnapshotterInitRequest.ConfigEntry + 11, // 2: generated.VolumeSnapshotter.Init:input_type -> generated.VolumeSnapshotterInitRequest + 0, // 3: generated.VolumeSnapshotter.CreateVolumeFromSnapshot:input_type -> generated.CreateVolumeRequest + 2, // 4: generated.VolumeSnapshotter.GetVolumeInfo:input_type -> generated.GetVolumeInfoRequest + 4, // 5: generated.VolumeSnapshotter.CreateSnapshot:input_type -> generated.CreateSnapshotRequest + 6, // 6: generated.VolumeSnapshotter.DeleteSnapshot:input_type -> generated.DeleteSnapshotRequest + 7, // 7: generated.VolumeSnapshotter.GetVolumeID:input_type -> generated.GetVolumeIDRequest + 9, // 8: generated.VolumeSnapshotter.SetVolumeID:input_type -> generated.SetVolumeIDRequest + 14, // 9: generated.VolumeSnapshotter.Init:output_type -> generated.Empty + 1, // 10: generated.VolumeSnapshotter.CreateVolumeFromSnapshot:output_type -> generated.CreateVolumeResponse + 3, // 11: generated.VolumeSnapshotter.GetVolumeInfo:output_type -> generated.GetVolumeInfoResponse + 5, // 12: generated.VolumeSnapshotter.CreateSnapshot:output_type -> generated.CreateSnapshotResponse + 14, // 13: generated.VolumeSnapshotter.DeleteSnapshot:output_type -> generated.Empty + 8, // 14: generated.VolumeSnapshotter.GetVolumeID:output_type -> generated.GetVolumeIDResponse + 10, // 15: generated.VolumeSnapshotter.SetVolumeID:output_type -> generated.SetVolumeIDResponse + 9, // [9:16] is the sub-list for method output_type + 2, // [2:9] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_VolumeSnapshotter_proto_init() } +func file_VolumeSnapshotter_proto_init() { + if File_VolumeSnapshotter_proto != nil { + return + } + file_Shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_VolumeSnapshotter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetVolumeIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetVolumeIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_VolumeSnapshotter_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSnapshotterInitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_VolumeSnapshotter_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_VolumeSnapshotter_proto_goTypes, + DependencyIndexes: file_VolumeSnapshotter_proto_depIdxs, + MessageInfos: file_VolumeSnapshotter_proto_msgTypes, + }.Build() + File_VolumeSnapshotter_proto = out.File + file_VolumeSnapshotter_proto_rawDesc = nil + file_VolumeSnapshotter_proto_goTypes = nil + file_VolumeSnapshotter_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for VolumeSnapshotter service +const _ = grpc.SupportPackageIsVersion6 +// VolumeSnapshotterClient is the client API for VolumeSnapshotter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VolumeSnapshotterClient interface { Init(ctx context.Context, in *VolumeSnapshotterInitRequest, opts ...grpc.CallOption) (*Empty, error) CreateVolumeFromSnapshot(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) @@ -365,16 +1090,16 @@ type VolumeSnapshotterClient interface { } type volumeSnapshotterClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewVolumeSnapshotterClient(cc *grpc.ClientConn) VolumeSnapshotterClient { +func NewVolumeSnapshotterClient(cc grpc.ClientConnInterface) VolumeSnapshotterClient { return &volumeSnapshotterClient{cc} } func (c *volumeSnapshotterClient) Init(ctx context.Context, in *VolumeSnapshotterInitRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/Init", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/Init", in, out, opts...) if err != nil { return nil, err } @@ -383,7 +1108,7 @@ func (c *volumeSnapshotterClient) Init(ctx context.Context, in *VolumeSnapshotte func (c *volumeSnapshotterClient) CreateVolumeFromSnapshot(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { out := new(CreateVolumeResponse) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/CreateVolumeFromSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/CreateVolumeFromSnapshot", in, out, opts...) if err != nil { return nil, err } @@ -392,7 +1117,7 @@ func (c *volumeSnapshotterClient) CreateVolumeFromSnapshot(ctx context.Context, func (c *volumeSnapshotterClient) GetVolumeInfo(ctx context.Context, in *GetVolumeInfoRequest, opts ...grpc.CallOption) (*GetVolumeInfoResponse, error) { out := new(GetVolumeInfoResponse) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/GetVolumeInfo", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/GetVolumeInfo", in, out, opts...) if err != nil { return nil, err } @@ -401,7 +1126,7 @@ func (c *volumeSnapshotterClient) GetVolumeInfo(ctx context.Context, in *GetVolu func (c *volumeSnapshotterClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { out := new(CreateSnapshotResponse) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/CreateSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/CreateSnapshot", in, out, opts...) if err != nil { return nil, err } @@ -410,7 +1135,7 @@ func (c *volumeSnapshotterClient) CreateSnapshot(ctx context.Context, in *Create func (c *volumeSnapshotterClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/DeleteSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/DeleteSnapshot", in, out, opts...) if err != nil { return nil, err } @@ -419,7 +1144,7 @@ func (c *volumeSnapshotterClient) DeleteSnapshot(ctx context.Context, in *Delete func (c *volumeSnapshotterClient) GetVolumeID(ctx context.Context, in *GetVolumeIDRequest, opts ...grpc.CallOption) (*GetVolumeIDResponse, error) { out := new(GetVolumeIDResponse) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/GetVolumeID", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/GetVolumeID", in, out, opts...) if err != nil { return nil, err } @@ -428,15 +1153,14 @@ func (c *volumeSnapshotterClient) GetVolumeID(ctx context.Context, in *GetVolume func (c *volumeSnapshotterClient) SetVolumeID(ctx context.Context, in *SetVolumeIDRequest, opts ...grpc.CallOption) (*SetVolumeIDResponse, error) { out := new(SetVolumeIDResponse) - err := grpc.Invoke(ctx, "/generated.VolumeSnapshotter/SetVolumeID", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/generated.VolumeSnapshotter/SetVolumeID", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for VolumeSnapshotter service - +// VolumeSnapshotterServer is the server API for VolumeSnapshotter service. type VolumeSnapshotterServer interface { Init(context.Context, *VolumeSnapshotterInitRequest) (*Empty, error) CreateVolumeFromSnapshot(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) @@ -447,6 +1171,32 @@ type VolumeSnapshotterServer interface { SetVolumeID(context.Context, *SetVolumeIDRequest) (*SetVolumeIDResponse, error) } +// UnimplementedVolumeSnapshotterServer can be embedded to have forward compatible implementations. +type UnimplementedVolumeSnapshotterServer struct { +} + +func (*UnimplementedVolumeSnapshotterServer) Init(context.Context, *VolumeSnapshotterInitRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) CreateVolumeFromSnapshot(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolumeFromSnapshot not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) GetVolumeInfo(context.Context, *GetVolumeInfoRequest) (*GetVolumeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolumeInfo not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) GetVolumeID(context.Context, *GetVolumeIDRequest) (*GetVolumeIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolumeID not implemented") +} +func (*UnimplementedVolumeSnapshotterServer) SetVolumeID(context.Context, *SetVolumeIDRequest) (*SetVolumeIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetVolumeID not implemented") +} + func RegisterVolumeSnapshotterServer(s *grpc.Server, srv VolumeSnapshotterServer) { s.RegisterService(&_VolumeSnapshotter_serviceDesc, srv) } @@ -613,45 +1363,3 @@ var _VolumeSnapshotter_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "VolumeSnapshotter.proto", } - -func init() { proto.RegisterFile("VolumeSnapshotter.proto", fileDescriptor7) } - -var fileDescriptor7 = []byte{ - // 566 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xd5, 0xda, 0x6e, 0x44, 0x26, 0xa5, 0x0a, 0x9b, 0xa4, 0x58, 0x16, 0x04, 0xe3, 0x0b, 0x51, - 0x0f, 0x96, 0x48, 0x0f, 0x14, 0x0e, 0x48, 0x51, 0x5d, 0x50, 0xd4, 0x4a, 0x48, 0x76, 0x41, 0x08, - 0x4e, 0x86, 0x6e, 0x5c, 0x8b, 0xc4, 0x36, 0xde, 0x4d, 0xa5, 0x7c, 0x0c, 0xbf, 0x82, 0xf8, 0x14, - 0x3e, 0x05, 0xc5, 0xde, 0x24, 0xbb, 0xb1, 0x1d, 0x87, 0x43, 0x6f, 0xde, 0x99, 0x9d, 0x37, 0x6f, - 0x66, 0xdf, 0x8c, 0xe1, 0xf1, 0xa7, 0x78, 0x3a, 0x9f, 0x11, 0x2f, 0xf2, 0x13, 0x7a, 0x1b, 0x33, - 0x46, 0x52, 0x3b, 0x49, 0x63, 0x16, 0xe3, 0x66, 0x40, 0x22, 0x92, 0xfa, 0x8c, 0xdc, 0x18, 0x87, - 0xde, 0xad, 0x9f, 0x92, 0x9b, 0xdc, 0x61, 0xfd, 0x42, 0xd0, 0x39, 0x4f, 0x89, 0xcf, 0x48, 0x1e, - 0xea, 0x92, 0x9f, 0x73, 0x42, 0x19, 0x3e, 0x86, 0x46, 0x32, 0x9d, 0x07, 0x61, 0xa4, 0x23, 0x13, - 0x0d, 0x9a, 0x2e, 0x3f, 0xe1, 0x3e, 0x00, 0xe5, 0xe8, 0x63, 0x47, 0x57, 0x32, 0x9f, 0x60, 0x59, - 0xfa, 0xef, 0x32, 0xa0, 0xeb, 0x45, 0x42, 0x74, 0x35, 0xf7, 0x6f, 0x2c, 0xd8, 0x80, 0x07, 0xf9, - 0x69, 0xf4, 0x45, 0xd7, 0x32, 0xef, 0xfa, 0x8c, 0x31, 0x68, 0x61, 0x9c, 0x50, 0xfd, 0xc0, 0x44, - 0x03, 0xd5, 0xcd, 0xbe, 0xad, 0x21, 0x74, 0x65, 0x7a, 0x34, 0x89, 0x23, 0x2a, 0xe0, 0x8c, 0x1d, - 0xce, 0x70, 0x7d, 0xb6, 0x26, 0xd0, 0x7d, 0x4f, 0x58, 0x1e, 0x30, 0x8e, 0x26, 0x71, 0x5d, 0x4d, - 0x22, 0x96, 0x22, 0x63, 0x49, 0x7c, 0x55, 0x99, 0xaf, 0x75, 0x09, 0xbd, 0xad, 0x3c, 0x9c, 0x9c, - 0xdc, 0x04, 0x54, 0x68, 0xc2, 0xaa, 0x50, 0x45, 0x28, 0xf4, 0x2f, 0x82, 0x5e, 0x5e, 0xe9, 0xea, - 0xf5, 0xee, 0x89, 0x36, 0x7e, 0x0b, 0x1a, 0xf3, 0x03, 0xaa, 0x6b, 0xa6, 0x3a, 0x68, 0x0d, 0x4f, - 0xec, 0xb5, 0x34, 0xec, 0xd2, 0xfc, 0xf6, 0xb5, 0x1f, 0xd0, 0x8b, 0x88, 0xa5, 0x0b, 0x37, 0x8b, - 0x33, 0x5e, 0x41, 0x73, 0x6d, 0xc2, 0x6d, 0x50, 0x7f, 0x90, 0x05, 0x67, 0xb6, 0xfc, 0xc4, 0x5d, - 0x38, 0xb8, 0xf3, 0xa7, 0x73, 0xc2, 0x39, 0xe5, 0x87, 0x37, 0xca, 0x19, 0xb2, 0xce, 0xe0, 0x78, - 0x3b, 0xc3, 0xa6, 0x61, 0x82, 0xaa, 0xd0, 0xb6, 0xaa, 0xac, 0x0f, 0xd0, 0x73, 0xc8, 0x94, 0xec, - 0xdf, 0x9b, 0x1a, 0x99, 0x5a, 0x9f, 0x01, 0x6f, 0x9e, 0xce, 0xa9, 0x43, 0x3b, 0x81, 0x76, 0x42, - 0x52, 0x1a, 0x52, 0x46, 0x22, 0x1e, 0x94, 0x61, 0x1e, 0xba, 0x05, 0xbb, 0xf5, 0x12, 0x3a, 0x12, - 0xf2, 0x1e, 0x7a, 0x65, 0x80, 0xbd, 0x7b, 0x21, 0x23, 0x65, 0x55, 0xb7, 0xb2, 0x8e, 0xa0, 0xe3, - 0x95, 0x10, 0x2d, 0x83, 0x47, 0x15, 0xb5, 0xfe, 0x46, 0xf0, 0xa4, 0xb0, 0x71, 0xc6, 0x51, 0x58, - 0xfb, 0x3c, 0x97, 0xd0, 0xf8, 0x1e, 0x47, 0x93, 0x30, 0xd0, 0x95, 0x4c, 0x84, 0xa7, 0x82, 0x08, - 0x77, 0x01, 0xda, 0xe7, 0x59, 0x54, 0xae, 0x46, 0x0e, 0x61, 0xbc, 0x86, 0x96, 0x60, 0xfe, 0x1f, - 0x45, 0x0e, 0xff, 0x68, 0xf0, 0xa8, 0x90, 0x0f, 0x8f, 0x40, 0x5b, 0xe6, 0xc4, 0x2f, 0xf6, 0x64, - 0x65, 0xb4, 0x85, 0x8b, 0x17, 0xb3, 0x84, 0x2d, 0xf0, 0x57, 0xd0, 0xc5, 0xb5, 0xf5, 0x2e, 0x8d, - 0x67, 0xab, 0x58, 0xdc, 0x2f, 0x4c, 0x9c, 0xb4, 0x7a, 0x8d, 0x67, 0x95, 0x7e, 0xfe, 0x44, 0x2e, - 0x3c, 0x94, 0xf6, 0x0e, 0x16, 0x23, 0xca, 0x36, 0x9f, 0x61, 0x56, 0x5f, 0xe0, 0x98, 0x1f, 0xe1, - 0x48, 0x9e, 0x4d, 0x6c, 0xd6, 0x2d, 0x06, 0xe3, 0xf9, 0x8e, 0x1b, 0x1c, 0xd6, 0x81, 0x23, 0x79, - 0x70, 0x25, 0xd8, 0xd2, 0x99, 0x2e, 0xe9, 0xe6, 0x15, 0xb4, 0x84, 0x99, 0xc2, 0x4f, 0x4b, 0xab, - 0x59, 0x0d, 0x8e, 0xd1, 0xaf, 0x72, 0x73, 0x4e, 0x57, 0xd0, 0xf2, 0x2a, 0xd0, 0xbc, 0xdd, 0x68, - 0x25, 0xf3, 0xf2, 0xad, 0x91, 0xfd, 0x47, 0x4f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc3, 0xf2, - 0xb8, 0x2f, 0x7b, 0x07, 0x00, 0x00, -} diff --git a/pkg/plugin/mocks/manager.go b/pkg/plugin/mocks/manager.go index 588f9b74d3..c99e1bf288 100644 --- a/pkg/plugin/mocks/manager.go +++ b/pkg/plugin/mocks/manager.go @@ -1,11 +1,18 @@ -// Code generated by mockery v2.1.0. DO NOT EDIT. +// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( mock "github.com/stretchr/testify/mock" + item_snapshotterv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" + + restoreitemactionv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" + + v1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1" + velero "github.com/vmware-tanzu/velero/pkg/plugin/velero" - isv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" + + volumesnapshotterv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" ) // Manager is an autogenerated mock type for the Manager type @@ -19,15 +26,15 @@ func (_m *Manager) CleanupClients() { } // GetBackupItemAction provides a mock function with given fields: name -func (_m *Manager) GetBackupItemAction(name string) (velero.BackupItemAction, error) { +func (_m *Manager) GetBackupItemAction(name string) (v1.BackupItemAction, error) { ret := _m.Called(name) - var r0 velero.BackupItemAction - if rf, ok := ret.Get(0).(func(string) velero.BackupItemAction); ok { + var r0 v1.BackupItemAction + if rf, ok := ret.Get(0).(func(string) v1.BackupItemAction); ok { r0 = rf(name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(velero.BackupItemAction) + r0 = ret.Get(0).(v1.BackupItemAction) } } @@ -42,15 +49,15 @@ func (_m *Manager) GetBackupItemAction(name string) (velero.BackupItemAction, er } // GetBackupItemActions provides a mock function with given fields: -func (_m *Manager) GetBackupItemActions() ([]velero.BackupItemAction, error) { +func (_m *Manager) GetBackupItemActions() ([]v1.BackupItemAction, error) { ret := _m.Called() - var r0 []velero.BackupItemAction - if rf, ok := ret.Get(0).(func() []velero.BackupItemAction); ok { + var r0 []v1.BackupItemAction + if rf, ok := ret.Get(0).(func() []v1.BackupItemAction); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]velero.BackupItemAction) + r0 = ret.Get(0).([]v1.BackupItemAction) } } @@ -110,16 +117,16 @@ func (_m *Manager) GetDeleteItemActions() ([]velero.DeleteItemAction, error) { return r0, r1 } -// GetObjectStore provides a mock function with given fields: name -func (_m *Manager) GetObjectStore(name string) (velero.ObjectStore, error) { +// GetItemSnapshotter provides a mock function with given fields: name +func (_m *Manager) GetItemSnapshotter(name string) (item_snapshotterv1.ItemSnapshotter, error) { ret := _m.Called(name) - var r0 velero.ObjectStore - if rf, ok := ret.Get(0).(func(string) velero.ObjectStore); ok { + var r0 item_snapshotterv1.ItemSnapshotter + if rf, ok := ret.Get(0).(func(string) item_snapshotterv1.ItemSnapshotter); ok { r0 = rf(name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(velero.ObjectStore) + r0 = ret.Get(0).(item_snapshotterv1.ItemSnapshotter) } } @@ -133,22 +140,22 @@ func (_m *Manager) GetObjectStore(name string) (velero.ObjectStore, error) { return r0, r1 } -// GetRestoreItemAction provides a mock function with given fields: name -func (_m *Manager) GetRestoreItemAction(name string) (velero.RestoreItemAction, error) { - ret := _m.Called(name) +// GetItemSnapshotters provides a mock function with given fields: +func (_m *Manager) GetItemSnapshotters() ([]item_snapshotterv1.ItemSnapshotter, error) { + ret := _m.Called() - var r0 velero.RestoreItemAction - if rf, ok := ret.Get(0).(func(string) velero.RestoreItemAction); ok { - r0 = rf(name) + var r0 []item_snapshotterv1.ItemSnapshotter + if rf, ok := ret.Get(0).(func() []item_snapshotterv1.ItemSnapshotter); ok { + r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(velero.RestoreItemAction) + r0 = ret.Get(0).([]item_snapshotterv1.ItemSnapshotter) } } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(name) + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() } else { r1 = ret.Error(1) } @@ -156,22 +163,22 @@ func (_m *Manager) GetRestoreItemAction(name string) (velero.RestoreItemAction, return r0, r1 } -// GetRestoreItemActions provides a mock function with given fields: -func (_m *Manager) GetRestoreItemActions() ([]velero.RestoreItemAction, error) { - ret := _m.Called() +// GetObjectStore provides a mock function with given fields: name +func (_m *Manager) GetObjectStore(name string) (velero.ObjectStore, error) { + ret := _m.Called(name) - var r0 []velero.RestoreItemAction - if rf, ok := ret.Get(0).(func() []velero.RestoreItemAction); ok { - r0 = rf() + var r0 velero.ObjectStore + if rf, ok := ret.Get(0).(func(string) velero.ObjectStore); ok { + r0 = rf(name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]velero.RestoreItemAction) + r0 = ret.Get(0).(velero.ObjectStore) } } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) } else { r1 = ret.Error(1) } @@ -179,16 +186,16 @@ func (_m *Manager) GetRestoreItemActions() ([]velero.RestoreItemAction, error) { return r0, r1 } -// GetVolumeSnapshotter provides a mock function with given fields: name -func (_m *Manager) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { +// GetRestoreItemAction provides a mock function with given fields: name +func (_m *Manager) GetRestoreItemAction(name string) (restoreitemactionv1.RestoreItemAction, error) { ret := _m.Called(name) - var r0 velero.VolumeSnapshotter - if rf, ok := ret.Get(0).(func(string) velero.VolumeSnapshotter); ok { + var r0 restoreitemactionv1.RestoreItemAction + if rf, ok := ret.Get(0).(func(string) restoreitemactionv1.RestoreItemAction); ok { r0 = rf(name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(velero.VolumeSnapshotter) + r0 = ret.Get(0).(restoreitemactionv1.RestoreItemAction) } } @@ -202,47 +209,48 @@ func (_m *Manager) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, return r0, r1 } -// GetItemSnapshotter provides a mock function with given fields: name -func (_m *Manager) GetItemSnapshotter(name string) (isv1.ItemSnapshotter, error) { - ret := _m.Called(name) +// GetRestoreItemActions provides a mock function with given fields: +func (_m *Manager) GetRestoreItemActions() ([]restoreitemactionv1.RestoreItemAction, error) { + ret := _m.Called() - var r0 isv1.ItemSnapshotter - if rf, ok := ret.Get(0).(func(string) isv1.ItemSnapshotter); ok { - r0 = rf(name) + var r0 []restoreitemactionv1.RestoreItemAction + if rf, ok := ret.Get(0).(func() []restoreitemactionv1.RestoreItemAction); ok { + r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(isv1.ItemSnapshotter) + r0 = ret.Get(0).([]restoreitemactionv1.RestoreItemAction) } } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(name) + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() } else { r1 = ret.Error(1) } return r0, r1 } -// GetItemSnapshotters provides a mock function with given fields: -func (_m *Manager) GetItemSnapshotters() ([]isv1.ItemSnapshotter, error) { - ret := _m.Called() - var r0 []isv1.ItemSnapshotter - if rf, ok := ret.Get(0).(func() []isv1.ItemSnapshotter); ok { - r0 = rf() +// GetVolumeSnapshotter provides a mock function with given fields: name +func (_m *Manager) GetVolumeSnapshotter(name string) (volumesnapshotterv1.VolumeSnapshotter, error) { + ret := _m.Called(name) + + var r0 volumesnapshotterv1.VolumeSnapshotter + if rf, ok := ret.Get(0).(func(string) volumesnapshotterv1.VolumeSnapshotter); ok { + r0 = rf(name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]isv1.ItemSnapshotter) + r0 = ret.Get(0).(volumesnapshotterv1.VolumeSnapshotter) } } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) } else { r1 = ret.Error(1) } return r0, r1 -} \ No newline at end of file +} diff --git a/pkg/plugin/mocks/process_factory.go b/pkg/plugin/mocks/process_factory.go index 22abc81fc1..586e311216 100644 --- a/pkg/plugin/mocks/process_factory.go +++ b/pkg/plugin/mocks/process_factory.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ // Code generated by mockery v1.0.0. DO NOT EDIT. + package mocks -import logrus "github.com/sirupsen/logrus" -import mock "github.com/stretchr/testify/mock" -import "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" +import ( + logrus "github.com/sirupsen/logrus" + mock "github.com/stretchr/testify/mock" + + process "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt/process" +) // ProcessFactory is an autogenerated mock type for the ProcessFactory type type ProcessFactory struct { @@ -26,15 +30,15 @@ type ProcessFactory struct { } // newProcess provides a mock function with given fields: command, logger, logLevel -func (_m *ProcessFactory) newProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (clientmgmt.Process, error) { +func (_m *ProcessFactory) newProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (process.Process, error) { ret := _m.Called(command, logger, logLevel) - var r0 clientmgmt.Process - if rf, ok := ret.Get(0).(func(string, logrus.FieldLogger, logrus.Level) clientmgmt.Process); ok { + var r0 process.Process + if rf, ok := ret.Get(0).(func(string, logrus.FieldLogger, logrus.Level) process.Process); ok { r0 = rf(command, logger, logLevel) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(clientmgmt.Process) + r0 = ret.Get(0).(process.Process) } } diff --git a/pkg/plugin/proto/BackupItemAction.proto b/pkg/plugin/proto/BackupItemAction.proto index c2422c90bd..66111300a7 100644 --- a/pkg/plugin/proto/BackupItemAction.proto +++ b/pkg/plugin/proto/BackupItemAction.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/DeleteItemAction.proto b/pkg/plugin/proto/DeleteItemAction.proto index 48c38817ea..f2838fe0c5 100644 --- a/pkg/plugin/proto/DeleteItemAction.proto +++ b/pkg/plugin/proto/DeleteItemAction.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/ItemSnapshotter.proto b/pkg/plugin/proto/ItemSnapshotter.proto index 8aa62a8999..a5f6455860 100644 --- a/pkg/plugin/proto/ItemSnapshotter.proto +++ b/pkg/plugin/proto/ItemSnapshotter.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/ObjectStore.proto b/pkg/plugin/proto/ObjectStore.proto index f418568fe4..4487a0cef3 100644 --- a/pkg/plugin/proto/ObjectStore.proto +++ b/pkg/plugin/proto/ObjectStore.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/PluginLister.proto b/pkg/plugin/proto/PluginLister.proto index caa8b02aad..55cfe62de8 100644 --- a/pkg/plugin/proto/PluginLister.proto +++ b/pkg/plugin/proto/PluginLister.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/RestoreItemAction.proto b/pkg/plugin/proto/RestoreItemAction.proto index 49b2568255..ae9c3ddd99 100644 --- a/pkg/plugin/proto/RestoreItemAction.proto +++ b/pkg/plugin/proto/RestoreItemAction.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/proto/Shared.proto b/pkg/plugin/proto/Shared.proto index a58f411249..c1c298e3a4 100644 --- a/pkg/plugin/proto/Shared.proto +++ b/pkg/plugin/proto/Shared.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; message Empty {} diff --git a/pkg/plugin/proto/VolumeSnapshotter.proto b/pkg/plugin/proto/VolumeSnapshotter.proto index 970fc1909c..affb762bcb 100644 --- a/pkg/plugin/proto/VolumeSnapshotter.proto +++ b/pkg/plugin/proto/VolumeSnapshotter.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package generated; +option go_package = "github.com/vmware-tanzu/velero/pkg/plugin/generated"; import "Shared.proto"; diff --git a/pkg/plugin/velero/backup_item_action.go b/pkg/plugin/velero/backupitemaction/v1/backup_item_action.go similarity index 81% rename from pkg/plugin/velero/backup_item_action.go rename to pkg/plugin/velero/backupitemaction/v1/backup_item_action.go index 70d28555f7..35566d98a4 100644 --- a/pkg/plugin/velero/backup_item_action.go +++ b/pkg/plugin/velero/backupitemaction/v1/backup_item_action.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package velero +package v1 import ( "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) // BackupItemAction is an actor that performs an operation on an individual item being backed up. @@ -28,18 +28,11 @@ type BackupItemAction interface { // AppliesTo returns information about which resources this action should be invoked for. // A BackupItemAction's Execute function will only be invoked on items that match the returned // selector. A zero-valued ResourceSelector matches all resources. - AppliesTo() (ResourceSelector, error) + AppliesTo() (velero.ResourceSelector, error) // Execute allows the ItemAction to perform arbitrary logic with the item being backed up, // including mutating the item itself prior to backup. The item (unmodified or modified) // should be returned, along with an optional slice of ResourceIdentifiers specifying // additional related items that should be backed up. - Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []ResourceIdentifier, error) -} - -// ResourceIdentifier describes a single item by its group, resource, namespace, and name. -type ResourceIdentifier struct { - schema.GroupResource - Namespace string - Name string + Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) } diff --git a/pkg/plugin/velero/item_snapshotter/v1/item_snapshotter.go b/pkg/plugin/velero/item_snapshotter/v1/item_snapshotter.go index 7d9ddcd00c..e43760b8e8 100644 --- a/pkg/plugin/velero/item_snapshotter/v1/item_snapshotter.go +++ b/pkg/plugin/velero/item_snapshotter/v1/item_snapshotter.go @@ -21,11 +21,10 @@ import ( "fmt" "time" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" - "k8s.io/apimachinery/pkg/runtime" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) type AlsoHandlesInput struct { diff --git a/pkg/plugin/velero/item_snapshotter/v1/mocks/item_snapshotter.go b/pkg/plugin/velero/item_snapshotter/v1/mocks/item_snapshotter.go index 940b854261..5d2b76c1df 100644 --- a/pkg/plugin/velero/item_snapshotter/v1/mocks/item_snapshotter.go +++ b/pkg/plugin/velero/item_snapshotter/v1/mocks/item_snapshotter.go @@ -5,9 +5,9 @@ package mocks import ( context "context" mock "github.com/stretchr/testify/mock" - v1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" velero "github.com/vmware-tanzu/velero/pkg/plugin/velero" + v1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/item_snapshotter/v1" ) // ItemSnapshotter is an autogenerated mock type for the ItemSnapshotter type diff --git a/pkg/backup/mocks/item_action.go b/pkg/plugin/velero/mocks/backupitemaction/v1/BackupItemAction.go similarity index 58% rename from pkg/backup/mocks/item_action.go rename to pkg/plugin/velero/mocks/backupitemaction/v1/BackupItemAction.go index 5d4bd8db55..cfca2ef6d2 100644 --- a/pkg/backup/mocks/item_action.go +++ b/pkg/plugin/velero/mocks/backupitemaction/v1/BackupItemAction.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ // Code generated by mockery v1.0.0. DO NOT EDIT. -package mocks + +package v1 import ( mock "github.com/stretchr/testify/mock" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + velero "github.com/vmware-tanzu/velero/pkg/plugin/velero" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -// ItemAction is an autogenerated mock type for the ItemAction type -type ItemAction struct { +// BackupItemAction is an autogenerated mock type for the BackupItemAction type +type BackupItemAction struct { mock.Mock } // AppliesTo provides a mock function with given fields: -func (_m *ItemAction) AppliesTo() (velero.ResourceSelector, error) { +func (_m *BackupItemAction) AppliesTo() (velero.ResourceSelector, error) { ret := _m.Called() var r0 velero.ResourceSelector @@ -50,13 +52,13 @@ func (_m *ItemAction) AppliesTo() (velero.ResourceSelector, error) { return r0, r1 } -// Execute provides a mock function with given fields: item, _a1 -func (_m *ItemAction) Execute(item runtime.Unstructured, _a1 *v1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - ret := _m.Called(item, _a1) +// Execute provides a mock function with given fields: item, backup +func (_m *BackupItemAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + ret := _m.Called(item, backup) var r0 runtime.Unstructured - if rf, ok := ret.Get(0).(func(runtime.Unstructured, *v1.Backup) runtime.Unstructured); ok { - r0 = rf(item, _a1) + if rf, ok := ret.Get(0).(func(runtime.Unstructured, *velerov1.Backup) runtime.Unstructured); ok { + r0 = rf(item, backup) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(runtime.Unstructured) @@ -64,8 +66,8 @@ func (_m *ItemAction) Execute(item runtime.Unstructured, _a1 *v1.Backup) (runtim } var r1 []velero.ResourceIdentifier - if rf, ok := ret.Get(1).(func(runtime.Unstructured, *v1.Backup) []velero.ResourceIdentifier); ok { - r1 = rf(item, _a1) + if rf, ok := ret.Get(1).(func(runtime.Unstructured, *velerov1.Backup) []velero.ResourceIdentifier); ok { + r1 = rf(item, backup) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]velero.ResourceIdentifier) @@ -73,8 +75,8 @@ func (_m *ItemAction) Execute(item runtime.Unstructured, _a1 *v1.Backup) (runtim } var r2 error - if rf, ok := ret.Get(2).(func(runtime.Unstructured, *v1.Backup) error); ok { - r2 = rf(item, _a1) + if rf, ok := ret.Get(2).(func(runtime.Unstructured, *velerov1.Backup) error); ok { + r2 = rf(item, backup) } else { r2 = ret.Error(2) } diff --git a/pkg/restore/mocks/item_action.go b/pkg/plugin/velero/mocks/restoreitemaction/v1/RestoreItemAction.go similarity index 60% rename from pkg/restore/mocks/item_action.go rename to pkg/plugin/velero/mocks/restoreitemaction/v1/RestoreItemAction.go index 8bdf3bbbc9..5bf33ed851 100644 --- a/pkg/restore/mocks/item_action.go +++ b/pkg/plugin/velero/mocks/restoreitemaction/v1/RestoreItemAction.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,21 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ // Code generated by mockery v1.0.0. DO NOT EDIT. -package mocks + +package v1 import ( mock "github.com/stretchr/testify/mock" - - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + velero "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) -// ItemAction is an autogenerated mock type for the ItemAction type -type ItemAction struct { +// RestoreItemAction is an autogenerated mock type for the RestoreItemAction type +type RestoreItemAction struct { mock.Mock } // AppliesTo provides a mock function with given fields: -func (_m *ItemAction) AppliesTo() (velero.ResourceSelector, error) { +func (_m *RestoreItemAction) AppliesTo() (velero.ResourceSelector, error) { ret := _m.Called() var r0 velero.ResourceSelector @@ -49,20 +50,20 @@ func (_m *ItemAction) AppliesTo() (velero.ResourceSelector, error) { } // Execute provides a mock function with given fields: input -func (_m *ItemAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (_m *RestoreItemAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { ret := _m.Called(input) - var r0 *velero.RestoreItemActionExecuteOutput - if rf, ok := ret.Get(0).(func(*velero.RestoreItemActionExecuteInput) *velero.RestoreItemActionExecuteOutput); ok { + var r0 *riav1.RestoreItemActionExecuteOutput + if rf, ok := ret.Get(0).(func(*riav1.RestoreItemActionExecuteInput) *riav1.RestoreItemActionExecuteOutput); ok { r0 = rf(input) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*velero.RestoreItemActionExecuteOutput) + r0 = ret.Get(0).(*riav1.RestoreItemActionExecuteOutput) } } var r1 error - if rf, ok := ret.Get(1).(func(*velero.RestoreItemActionExecuteInput) error); ok { + if rf, ok := ret.Get(1).(func(*riav1.RestoreItemActionExecuteInput) error); ok { r1 = rf(input) } else { r1 = ret.Error(1) diff --git a/pkg/plugin/velero/mocks/volume_snapshotter.go b/pkg/plugin/velero/mocks/volumesnapshotter/v1/VolumeSnapshotter.go similarity index 100% rename from pkg/plugin/velero/mocks/volume_snapshotter.go rename to pkg/plugin/velero/mocks/volumesnapshotter/v1/VolumeSnapshotter.go diff --git a/pkg/plugin/velero/restore_item_action.go b/pkg/plugin/velero/restoreitemaction/v1/restore_item_action.go similarity index 94% rename from pkg/plugin/velero/restore_item_action.go rename to pkg/plugin/velero/restoreitemaction/v1/restore_item_action.go index ea758c93a5..a50f3021f0 100644 --- a/pkg/plugin/velero/restore_item_action.go +++ b/pkg/plugin/velero/restoreitemaction/v1/restore_item_action.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package velero +package v1 import ( "k8s.io/apimachinery/pkg/runtime" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" ) // RestoreItemAction is an actor that performs an operation on an individual item being restored. @@ -27,7 +28,7 @@ type RestoreItemAction interface { // AppliesTo returns information about which resources this action should be invoked for. // A RestoreItemAction's Execute function will only be invoked on items that match the returned // selector. A zero-valued ResourceSelector matches all resources. - AppliesTo() (ResourceSelector, error) + AppliesTo() (velero.ResourceSelector, error) // Execute allows the ItemAction to perform arbitrary logic with the item being restored, // including mutating the item itself prior to restore. The item (unmodified or modified) @@ -56,7 +57,7 @@ type RestoreItemActionExecuteOutput struct { // AdditionalItems is a list of additional related items that should // be restored. - AdditionalItems []ResourceIdentifier + AdditionalItems []velero.ResourceIdentifier // SkipRestore tells velero to stop executing further actions // on this item, and skip the restore step. When this field's diff --git a/pkg/plugin/velero/shared.go b/pkg/plugin/velero/shared.go index 76c7464ad0..a0d3d884c2 100644 --- a/pkg/plugin/velero/shared.go +++ b/pkg/plugin/velero/shared.go @@ -20,6 +20,8 @@ limitations under the License. // plugins of any type can be implemented. package velero +import "k8s.io/apimachinery/pkg/runtime/schema" + // ResourceSelector is a collection of included/excluded namespaces, // included/excluded resources, and a label-selector that can be used // to match a set of items from a cluster. @@ -54,3 +56,10 @@ type Applicable interface { // AppliesTo returns information about which resources this Responder should be invoked for. AppliesTo() (ResourceSelector, error) } + +// ResourceIdentifier describes a single item by its group, resource, namespace, and name. +type ResourceIdentifier struct { + schema.GroupResource + Namespace string + Name string +} diff --git a/pkg/plugin/velero/volume_snapshotter.go b/pkg/plugin/velero/volumesnapshotter/v1/volume_snapshotter.go similarity index 97% rename from pkg/plugin/velero/volume_snapshotter.go rename to pkg/plugin/velero/volumesnapshotter/v1/volume_snapshotter.go index 1a4e38bc0b..2c52b753f9 100644 --- a/pkg/plugin/velero/volume_snapshotter.go +++ b/pkg/plugin/velero/volumesnapshotter/v1/volume_snapshotter.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package velero +package v1 import ( "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 116a5c4e77..3a5db6ceb6 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -32,8 +32,10 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" + "github.com/vmware-tanzu/velero/pkg/nodeagent" "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) // Backupper can execute restic backups of volumes in a pod. @@ -49,6 +51,8 @@ type backupper struct { veleroClient clientset.Interface pvcClient corev1client.PersistentVolumeClaimsGetter pvClient corev1client.PersistentVolumesGetter + podClient corev1client.PodsGetter + uploaderType string results map[string]chan *velerov1api.PodVolumeBackup resultsLock sync.Mutex @@ -62,6 +66,8 @@ func newBackupper( veleroClient clientset.Interface, pvcClient corev1client.PersistentVolumeClaimsGetter, pvClient corev1client.PersistentVolumesGetter, + podClient corev1client.PodsGetter, + uploaderType string, log logrus.FieldLogger, ) *backupper { b := &backupper{ @@ -71,6 +77,8 @@ func newBackupper( veleroClient: veleroClient, pvcClient: pvcClient, pvClient: pvClient, + podClient: podClient, + uploaderType: uploaderType, results: make(map[string]chan *velerov1api.PodVolumeBackup), } @@ -107,7 +115,23 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. return nil, nil } - repo, err := b.repoEnsurer.EnsureRepo(b.ctx, backup.Namespace, pod.Namespace, backup.Spec.StorageLocation) + repositoryType := getRepositoryType(b.uploaderType) + if repositoryType == "" { + err := errors.Errorf("empty repository type, uploader %s", b.uploaderType) + return nil, []error{err} + } + + repo, err := b.repoEnsurer.EnsureRepo(b.ctx, backup.Namespace, pod.Namespace, backup.Spec.StorageLocation, repositoryType) + if err != nil { + return nil, []error{err} + } + + err = kube.IsPodRunning(pod) + if err != nil { + return nil, []error{err} + } + + err = nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient) if err != nil { return nil, []error{err} } @@ -158,11 +182,6 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } } - // ignore non-running pods - if pod.Status.Phase != corev1api.PodRunning { - log.Warnf("Skipping volume %s in pod %s/%s - pod not running", volumeName, pod.Namespace, pod.Name) - continue - } // hostPath volumes are not supported because they're not mounted into /var/lib/kubelet/pods, so our // daemonset pod has no way to access their data. isHostPath, err := isHostPathVolume(&volume, pvc, b.pvClient.PersistentVolumes()) @@ -182,8 +201,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. continue } - // TODO: Remove the hard-coded uploader type before v1.10 FC - volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, "restic", pvc) + volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, b.uploaderType, pvc) if volumeBackup, err = b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil { errs = append(errs, err) continue diff --git a/pkg/podvolume/backupper_factory.go b/pkg/podvolume/backupper_factory.go index aaaa5e2ac1..7b87865e22 100644 --- a/pkg/podvolume/backupper_factory.go +++ b/pkg/podvolume/backupper_factory.go @@ -35,7 +35,7 @@ import ( // BackupperFactory can construct pod volumes backuppers. type BackupperFactory interface { // NewBackupper returns a pod volumes backupper for use during a single Velero backup. - NewBackupper(context.Context, *velerov1api.Backup) (Backupper, error) + NewBackupper(context.Context, *velerov1api.Backup, string) (Backupper, error) } func NewBackupperFactory(repoLocker *repository.RepoLocker, @@ -43,6 +43,7 @@ func NewBackupperFactory(repoLocker *repository.RepoLocker, veleroClient clientset.Interface, pvcClient corev1client.PersistentVolumeClaimsGetter, pvClient corev1client.PersistentVolumesGetter, + podClient corev1client.PodsGetter, repoInformerSynced cache.InformerSynced, log logrus.FieldLogger) BackupperFactory { return &backupperFactory{ @@ -51,6 +52,7 @@ func NewBackupperFactory(repoLocker *repository.RepoLocker, veleroClient: veleroClient, pvcClient: pvcClient, pvClient: pvClient, + podClient: podClient, repoInformerSynced: repoInformerSynced, log: log, } @@ -62,11 +64,12 @@ type backupperFactory struct { veleroClient clientset.Interface pvcClient corev1client.PersistentVolumeClaimsGetter pvClient corev1client.PersistentVolumesGetter + podClient corev1client.PodsGetter repoInformerSynced cache.InformerSynced log logrus.FieldLogger } -func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup) (Backupper, error) { +func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup, uploaderType string) (Backupper, error) { informer := velerov1informers.NewFilteredPodVolumeBackupInformer( bf.veleroClient, backup.Namespace, @@ -77,7 +80,7 @@ func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1ap }, ) - b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, informer, bf.veleroClient, bf.pvcClient, bf.pvClient, bf.log) + b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, informer, bf.veleroClient, bf.pvcClient, bf.pvClient, bf.podClient, uploaderType, bf.log) go informer.Run(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, bf.repoInformerSynced) { diff --git a/pkg/podvolume/restorer.go b/pkg/podvolume/restorer.go index daa3a630d7..09bb3a790f 100644 --- a/pkg/podvolume/restorer.go +++ b/pkg/podvolume/restorer.go @@ -56,6 +56,7 @@ type restorer struct { resultsLock sync.Mutex results map[string]chan *velerov1api.PodVolumeRestore + log logrus.FieldLogger } func newRestorer( @@ -75,6 +76,7 @@ func newRestorer( pvcClient: pvcClient, results: make(map[string]chan *velerov1api.PodVolumeRestore), + log: log, } podVolumeRestoreInformer.AddEventHandler( @@ -101,12 +103,17 @@ func newRestorer( } func (r *restorer) RestorePodVolumes(data RestoreData) []error { - volumesToRestore := GetVolumeBackupsForPod(data.PodVolumeBackups, data.Pod, data.SourceNamespace) + volumesToRestore := getVolumeBackupInfoForPod(data.PodVolumeBackups, data.Pod, data.SourceNamespace) if len(volumesToRestore) == 0 { return nil } - repo, err := r.repoEnsurer.EnsureRepo(r.ctx, data.Restore.Namespace, data.SourceNamespace, data.BackupLocation) + repositoryType, err := getVolumesRepositoryType(volumesToRestore) + if err != nil { + return []error{err} + } + + repo, err := r.repoEnsurer.EnsureRepo(r.ctx, data.Restore.Namespace, data.SourceNamespace, data.BackupLocation, repositoryType) if err != nil { return []error{err} } @@ -132,7 +139,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { for _, podVolume := range data.Pod.Spec.Volumes { podVolumes[podVolume.Name] = podVolume } - for volume, snapshot := range volumesToRestore { + for volume, backupInfo := range volumesToRestore { volumeObj, ok := podVolumes[volume] var pvc *corev1api.PersistentVolumeClaim if ok { @@ -144,8 +151,8 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { } } } - // TODO: Remove the hard-coded uploader type before v1.10 FC - volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier, "restic", pvc) + + volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, backupInfo.snapshotID, repo.Spec.ResticIdentifier, backupInfo.uploaderType, data.SourceNamespace, pvc) if err := errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil { errs = append(errs, errors.WithStack(err)) @@ -174,7 +181,7 @@ ForEachVolume: return errs } -func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore { +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType, sourceNamespace string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore { pvr := &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, @@ -205,6 +212,8 @@ func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backu SnapshotID: snapshot, BackupStorageLocation: backupLocation, RepoIdentifier: repoIdentifier, + UploaderType: uploaderType, + SourceNamespace: sourceNamespace, }, } if pvc != nil { @@ -213,3 +222,29 @@ func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backu } return pvr } + +func getVolumesRepositoryType(volumes map[string]volumeBackupInfo) (string, error) { + if len(volumes) == 0 { + return "", errors.New("empty volume list") + } + + // the podVolumeBackups list come from one backup. In one backup, it is impossible that volumes are + // backed up by different uploaders or to different repositories. Asserting this ensures one repo only, + // which will simplify the following logics + repositoryType := "" + for _, backupInfo := range volumes { + if backupInfo.repositoryType == "" { + return "", errors.Errorf("empty repository type found among volume snapshots, snapshot ID %s, uploader %s", + backupInfo.snapshotID, backupInfo.uploaderType) + } + + if repositoryType == "" { + repositoryType = backupInfo.repositoryType + } else if repositoryType != backupInfo.repositoryType { + return "", errors.Errorf("multiple repository type in one backup, current type %s, differential one [type %s, snapshot ID %s, uploader %s]", + repositoryType, backupInfo.repositoryType, backupInfo.snapshotID, backupInfo.uploaderType) + } + } + + return repositoryType, nil +} diff --git a/pkg/podvolume/restorer_test.go b/pkg/podvolume/restorer_test.go new file mode 100644 index 0000000000..19e3ec44bd --- /dev/null +++ b/pkg/podvolume/restorer_test.go @@ -0,0 +1,102 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podvolume + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetVolumesRepositoryType(t *testing.T) { + testCases := []struct { + name string + volumes map[string]volumeBackupInfo + expected string + expectedErr string + prefixOnly bool + }{ + { + name: "empty volume", + expectedErr: "empty volume list", + }, + { + name: "empty repository type, first one", + volumes: map[string]volumeBackupInfo{ + "volume1": {"fake-snapshot-id-1", "fake-uploader-1", ""}, + "volume2": {"", "", "fake-type"}, + }, + expectedErr: "empty repository type found among volume snapshots, snapshot ID fake-snapshot-id-1, uploader fake-uploader-1", + }, + { + name: "empty repository type, last one", + volumes: map[string]volumeBackupInfo{ + "volume1": {"", "", "fake-type"}, + "volume2": {"", "", "fake-type"}, + "volume3": {"fake-snapshot-id-3", "fake-uploader-3", ""}, + }, + expectedErr: "empty repository type found among volume snapshots, snapshot ID fake-snapshot-id-3, uploader fake-uploader-3", + }, + { + name: "empty repository type, middle one", + volumes: map[string]volumeBackupInfo{ + "volume1": {"", "", "fake-type"}, + "volume2": {"fake-snapshot-id-2", "fake-uploader-2", ""}, + "volume3": {"", "", "fake-type"}, + }, + expectedErr: "empty repository type found among volume snapshots, snapshot ID fake-snapshot-id-2, uploader fake-uploader-2", + }, + { + name: "mismatch repository type", + volumes: map[string]volumeBackupInfo{ + "volume1": {"", "", "fake-type1"}, + "volume2": {"fake-snapshot-id-2", "fake-uploader-2", "fake-type2"}, + }, + prefixOnly: true, + expectedErr: "multiple repository type in one backup", + }, + { + name: "success", + volumes: map[string]volumeBackupInfo{ + "volume1": {"", "", "fake-type"}, + "volume2": {"", "", "fake-type"}, + "volume3": {"", "", "fake-type"}, + }, + expected: "fake-type", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual, err := getVolumesRepositoryType(tc.volumes) + assert.Equal(t, tc.expected, actual) + + if err != nil { + if tc.prefixOnly { + errMsg := err.Error() + if len(errMsg) >= len(tc.expectedErr) { + errMsg = errMsg[0:len(tc.expectedErr)] + } + + assert.Equal(t, tc.expectedErr, errMsg) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + } + }) + } +} diff --git a/pkg/podvolume/util.go b/pkg/podvolume/util.go index 57baacc106..d75b393153 100644 --- a/pkg/podvolume/util.go +++ b/pkg/podvolume/util.go @@ -23,6 +23,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/uploader" ) const ( @@ -42,12 +44,53 @@ const ( // VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes // should be excluded from restic backup. VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" + + // InitContainer is the name of the init container added + // to workload pods to help with restores. + InitContainer = "restic-wait" + + // DefaultVolumesToFsBackup specifies whether pod volume backup should be used, by default, to + // take backup of all pod volumes. + DefaultVolumesToFsBackup = false ) +// volumeBackupInfo describes the backup info of a volume backed up by PodVolumeBackups +type volumeBackupInfo struct { + snapshotID string + uploaderType string + repositoryType string +} + // GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, // of the PodVolumeBackups that exist for the provided pod. func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]string { + volumeBkInfo := getVolumeBackupInfoForPod(podVolumeBackups, pod, sourcePodNs) + if volumeBkInfo == nil { + return nil + } + volumes := make(map[string]string) + for k, v := range volumeBkInfo { + volumes[k] = v.snapshotID + } + + return volumes +} + +// GetPvbRepositoryType returns the repositoryType according to the PVB information +func GetPvbRepositoryType(pvb *velerov1api.PodVolumeBackup) string { + return getRepositoryType(pvb.Spec.UploaderType) +} + +// GetPvrRepositoryType returns the repositoryType according to the PVR information +func GetPvrRepositoryType(pvr *velerov1api.PodVolumeRestore) string { + return getRepositoryType(pvr.Spec.UploaderType) +} + +// getVolumeBackupInfoForPod returns a map, of volume name -> VolumeBackupInfo, +// of the PodVolumeBackups that exist for the provided pod. +func getVolumeBackupInfoForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]volumeBackupInfo { + volumes := make(map[string]volumeBackupInfo) for _, pvb := range podVolumeBackups { if !isPVBMatchPod(pvb, pod.GetName(), sourcePodNs) { @@ -67,14 +110,74 @@ func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod continue } - volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID + volumes[pvb.Spec.Volume] = volumeBackupInfo{ + snapshotID: pvb.Status.SnapshotID, + uploaderType: getUploaderTypeOrDefault(pvb.Spec.UploaderType), + repositoryType: getRepositoryType(pvb.Spec.UploaderType), + } } if len(volumes) > 0 { return volumes } - return getPodSnapshotAnnotations(pod) + fromAnnntation := getPodSnapshotAnnotations(pod) + if fromAnnntation == nil { + return nil + } + + for k, v := range fromAnnntation { + volumes[k] = volumeBackupInfo{v, uploader.ResticType, velerov1api.BackupRepositoryTypeRestic} + } + + return volumes +} + +// GetSnapshotIdentifier returns the snapshots represented by SnapshotIdentifier for the given PVBs +func GetSnapshotIdentifier(podVolumeBackups *velerov1api.PodVolumeBackupList) []repository.SnapshotIdentifier { + var res []repository.SnapshotIdentifier + for _, item := range podVolumeBackups.Items { + if item.Status.SnapshotID == "" { + continue + } + + res = append(res, repository.SnapshotIdentifier{ + VolumeNamespace: item.Spec.Pod.Namespace, + BackupStorageLocation: item.Spec.BackupStorageLocation, + SnapshotID: item.Status.SnapshotID, + RepositoryType: getRepositoryType(item.Spec.UploaderType), + }) + } + + return res +} + +func getUploaderTypeOrDefault(uploaderType string) string { + if uploaderType != "" { + return uploaderType + } else { + return uploader.ResticType + } +} + +// getRepositoryType returns the hardcode repositoryType for different backup methods - Restic or Kopia,uploaderType +// indicates the method. +// For Restic backup method, it is always hardcode to BackupRepositoryTypeRestic, never changed. +// For Kopia backup method, this means we hardcode repositoryType as BackupRepositoryTypeKopia for Unified Repo, +// at present (Kopia backup method is using Unified Repo). However, it doesn't mean we could deduce repositoryType +// from uploaderType for Unified Repo. +// TODO: post v1.10, refactor this function for Kopia backup method. In future, when we have multiple implementations of +// Unified Repo (besides Kopia), we will add the repositoryType to BSL, because by then, we are not able to hardcode +// the repositoryType to BackupRepositoryTypeKopia for Unified Repo. +func getRepositoryType(uploaderType string) string { + switch uploaderType { + case "", uploader.ResticType: + return velerov1api.BackupRepositoryTypeRestic + case uploader.KopiaType: + return velerov1api.BackupRepositoryTypeKopia + default: + return "" + } } func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace string) bool { @@ -154,9 +257,9 @@ func contains(list []string, k string) bool { return false } -// GetPodVolumesUsingRestic returns a list of volume names to backup for the provided pod. -func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) []string { - if !defaultVolumesToRestic { +// GetVolumesByPod returns a list of volume names to backup for the provided pod. +func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup bool) []string { + if !defaultVolumesToFsBackup { return GetVolumesToBackup(pod) } diff --git a/pkg/podvolume/util_test.go b/pkg/podvolume/util_test.go index 88b5746685..936ee26d37 100644 --- a/pkg/podvolume/util_test.go +++ b/pkg/podvolume/util_test.go @@ -348,16 +348,16 @@ func TestGetVolumesToBackup(t *testing.T) { } } -func TestGetPodVolumesUsingRestic(t *testing.T) { +func TestGetVolumesByPod(t *testing.T) { testCases := []struct { - name string - pod *corev1api.Pod - expected []string - defaultVolumesToRestic bool + name string + pod *corev1api.Pod + expected []string + defaultVolumesToFsBackup bool }{ { - name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToRestic is false", - defaultVolumesToRestic: false, + name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToFsBackup is false", + defaultVolumesToFsBackup: false, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -368,8 +368,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should get all pod volumes when defaultVolumesToRestic is true and no PVs are excluded", - defaultVolumesToRestic: true, + name: "should get all pod volumes when defaultVolumesToFsBackup is true and no PVs are excluded", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ @@ -381,8 +381,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should get all pod volumes except ones excluded when defaultVolumesToRestic is true", - defaultVolumesToRestic: true, + name: "should get all pod volumes except ones excluded when defaultVolumesToFsBackup is true", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -401,8 +401,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude default service account token from restic backup", - defaultVolumesToRestic: true, + name: "should exclude default service account token from restic backup", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ @@ -416,8 +416,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude host path volumes from restic backups", - defaultVolumesToRestic: true, + name: "should exclude host path volumes from restic backups", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -438,8 +438,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude volumes mounting secrets", - defaultVolumesToRestic: true, + name: "should exclude volumes mounting secrets", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -460,8 +460,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude volumes mounting config maps", - defaultVolumesToRestic: true, + name: "should exclude volumes mounting config maps", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -482,8 +482,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude projected volumes", - defaultVolumesToRestic: true, + name: "should exclude projected volumes", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -517,8 +517,8 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { expected: []string{"resticPV1", "resticPV2", "resticPV3"}, }, { - name: "should exclude DownwardAPI volumes", - defaultVolumesToRestic: true, + name: "should exclude DownwardAPI volumes", + defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -553,7 +553,7 @@ func TestGetPodVolumesUsingRestic(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual := GetPodVolumesUsingRestic(tc.pod, tc.defaultVolumesToRestic) + actual := GetVolumesByPod(tc.pod, tc.defaultVolumesToFsBackup) sort.Strings(tc.expected) sort.Strings(actual) diff --git a/pkg/repository/backup_repo_op.go b/pkg/repository/backup_repo_op.go new file mode 100644 index 0000000000..3ae0f28121 --- /dev/null +++ b/pkg/repository/backup_repo_op.go @@ -0,0 +1,115 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/label" +) + +// A BackupRepositoryKey uniquely identify a backup repository +type BackupRepositoryKey struct { + VolumeNamespace string + BackupLocation string + RepositoryType string +} + +var ( + backupRepoNotFoundError = errors.New("backup repository not found") + backupRepoNotProvisionedError = errors.New("backup repository not provisioned") +) + +func repoLabelsFromKey(key BackupRepositoryKey) labels.Set { + return map[string]string{ + velerov1api.VolumeNamespaceLabel: label.GetValidName(key.VolumeNamespace), + velerov1api.StorageLocationLabel: label.GetValidName(key.BackupLocation), + velerov1api.RepositoryTypeLabel: label.GetValidName(key.RepositoryType), + } +} + +// GetBackupRepository gets a backup repository through BackupRepositoryKey and ensure ready if required. +func GetBackupRepository(ctx context.Context, cli client.Client, namespace string, key BackupRepositoryKey, options ...bool) (*velerov1api.BackupRepository, error) { + var ensureReady = true + if len(options) > 0 { + ensureReady = options[0] + } + + selector := labels.SelectorFromSet(repoLabelsFromKey(key)) + + backupRepoList := &velerov1api.BackupRepositoryList{} + err := cli.List(ctx, backupRepoList, &client.ListOptions{ + Namespace: namespace, + LabelSelector: selector, + }) + + if err != nil { + return nil, errors.Wrap(err, "error getting backup repository list") + } + + if len(backupRepoList.Items) == 0 { + return nil, backupRepoNotFoundError + } + + if len(backupRepoList.Items) > 1 { + return nil, errors.Errorf("more than one BackupRepository found for workload namespace %q, backup storage location %q, repository type %q", key.VolumeNamespace, key.BackupLocation, key.RepositoryType) + } + + repo := &backupRepoList.Items[0] + + if ensureReady { + if repo.Status.Phase == velerov1api.BackupRepositoryPhaseNotReady { + return nil, errors.Errorf("backup repository is not ready: %s", repo.Status.Message) + } + + if repo.Status.Phase == "" || repo.Status.Phase == velerov1api.BackupRepositoryPhaseNew { + return nil, backupRepoNotProvisionedError + } + } + + return repo, nil +} + +func newBackupRepository(namespace string, key BackupRepositoryKey) *velerov1api.BackupRepository { + return &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + GenerateName: fmt.Sprintf("%s-%s-%s-", key.VolumeNamespace, key.BackupLocation, key.RepositoryType), + Labels: repoLabelsFromKey(key), + }, + Spec: velerov1api.BackupRepositorySpec{ + VolumeNamespace: key.VolumeNamespace, + BackupStorageLocation: key.BackupLocation, + RepositoryType: key.RepositoryType, + }, + } +} + +func isBackupRepositoryNotFoundError(err error) bool { + return (err == backupRepoNotFoundError) +} + +func isBackupRepositoryNotProvisionedError(err error) bool { + return (err == backupRepoNotProvisionedError) +} diff --git a/pkg/repository/backup_repo_op_test.go b/pkg/repository/backup_repo_op_test.go new file mode 100644 index 0000000000..a317e22c2b --- /dev/null +++ b/pkg/repository/backup_repo_op_test.go @@ -0,0 +1,169 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "context" + "fmt" + + "github.com/stretchr/testify/assert" + + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + + velerotest "github.com/vmware-tanzu/velero/pkg/test" +) + +func buildBackupRepo(key BackupRepositoryKey, phase velerov1api.BackupRepositoryPhase, seqNum string) velerov1api.BackupRepository { + return velerov1api.BackupRepository{ + Spec: velerov1api.BackupRepositorySpec{ResticIdentifier: ""}, + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "BackupRepository", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: fmt.Sprintf("%s-%s-%s-%s", key.VolumeNamespace, key.BackupLocation, key.RepositoryType, seqNum), + Labels: map[string]string{ + velerov1api.StorageLocationLabel: key.BackupLocation, + velerov1api.VolumeNamespaceLabel: key.VolumeNamespace, + velerov1api.RepositoryTypeLabel: key.RepositoryType, + }, + }, + Status: velerov1api.BackupRepositoryStatus{ + Phase: phase, + }, + } +} + +func buildBackupRepoPointer(key BackupRepositoryKey, phase velerov1api.BackupRepositoryPhase, seqNum string) *velerov1api.BackupRepository { + value := buildBackupRepo(key, phase, seqNum) + return &value +} + +func TestGetBackupRepository(t *testing.T) { + testCases := []struct { + name string + backupRepositories []velerov1api.BackupRepository + ensureReady bool + backupRepositoryKey BackupRepositoryKey + expected *velerov1api.BackupRepository + expectedErr string + }{ + { + name: "repository not found", + expectedErr: "backup repository not found", + }, + { + name: "found more than one repository", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, velerov1api.BackupRepositoryPhaseReady, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, + expectedErr: "more than one BackupRepository found for workload namespace \"fake-volume-ns\", backup storage location \"fake-bsl\", repository type \"fake-repository-type\"", + }, + { + name: "repository not ready, not expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02"), + }, + { + name: "repository is new, not expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02"), + }, + { + name: "repository state is empty, not expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02"), + }, + { + name: "repository not ready, expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + ensureReady: true, + expectedErr: "backup repository is not ready: ", + }, + { + name: "repository is new, expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + ensureReady: true, + expectedErr: "backup repository not provisioned", + }, + { + name: "repository state is empty, expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + ensureReady: true, + expectedErr: "backup repository not provisioned", + }, + { + name: "repository ready, expect ready", + backupRepositories: []velerov1api.BackupRepository{ + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseNotReady, "01"), + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseReady, "02")}, + backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, + ensureReady: true, + expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseReady, "02"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + clientBuilder := velerotest.NewFakeControllerRuntimeClientBuilder(t) + clientBuilder.WithLists(&velerov1api.BackupRepositoryList{ + Items: tc.backupRepositories, + }) + fakeClient := clientBuilder.Build() + + backupRepo, err := GetBackupRepository(context.Background(), fakeClient, velerov1api.DefaultNamespace, tc.backupRepositoryKey, tc.ensureReady) + + if backupRepo != nil && tc.expected != nil { + backupRepo.ResourceVersion = tc.expected.ResourceVersion + require.Equal(t, *tc.expected, *backupRepo) + } else { + require.Equal(t, tc.expected, backupRepo) + } + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/ensurer.go b/pkg/repository/ensurer.go index 15aa107014..5527ac7422 100644 --- a/pkg/repository/ensurer.go +++ b/pkg/repository/ensurer.go @@ -18,198 +18,116 @@ package repository import ( "context" - "fmt" "sync" "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + "k8s.io/apimachinery/pkg/util/wait" + + "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" - velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" - velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" - "github.com/vmware-tanzu/velero/pkg/label" ) // RepositoryEnsurer ensures that backup repositories are created and ready. type RepositoryEnsurer struct { log logrus.FieldLogger - repoLister velerov1listers.BackupRepositoryLister - repoClient velerov1client.BackupRepositoriesGetter - - repoChansLock sync.Mutex - repoChans map[string]chan *velerov1api.BackupRepository + repoClient client.Client // repoLocksMu synchronizes reads/writes to the repoLocks map itself // since maps are not threadsafe. repoLocksMu sync.Mutex - repoLocks map[repoKey]*sync.Mutex + repoLocks map[BackupRepositoryKey]*sync.Mutex } -type repoKey struct { - volumeNamespace string - backupLocation string -} - -func NewRepositoryEnsurer(repoInformer velerov1informers.BackupRepositoryInformer, repoClient velerov1client.BackupRepositoriesGetter, log logrus.FieldLogger) *RepositoryEnsurer { - r := &RepositoryEnsurer{ +func NewRepositoryEnsurer(repoClient client.Client, log logrus.FieldLogger) *RepositoryEnsurer { + return &RepositoryEnsurer{ log: log, - repoLister: repoInformer.Lister(), repoClient: repoClient, - repoChans: make(map[string]chan *velerov1api.BackupRepository), - repoLocks: make(map[repoKey]*sync.Mutex), + repoLocks: make(map[BackupRepositoryKey]*sync.Mutex), } - - repoInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, upd interface{}) { - oldObj := old.(*velerov1api.BackupRepository) - newObj := upd.(*velerov1api.BackupRepository) - - // we're only interested in phase-changing updates - if oldObj.Status.Phase == newObj.Status.Phase { - return - } - - // we're only interested in updates where the updated object is either Ready or NotReady - if newObj.Status.Phase != velerov1api.BackupRepositoryPhaseReady && newObj.Status.Phase != velerov1api.BackupRepositoryPhaseNotReady { - return - } - - r.repoChansLock.Lock() - defer r.repoChansLock.Unlock() - - key := repoLabels(newObj.Spec.VolumeNamespace, newObj.Spec.BackupStorageLocation).String() - repoChan, ok := r.repoChans[key] - if !ok { - log.Debugf("No ready channel found for repository %s/%s", newObj.Namespace, newObj.Name) - return - } - - repoChan <- newObj - }, - }, - ) - - return r } -func repoLabels(volumeNamespace, backupLocation string) labels.Set { - return map[string]string{ - velerov1api.ResticVolumeNamespaceLabel: label.GetValidName(volumeNamespace), - velerov1api.StorageLocationLabel: label.GetValidName(backupLocation), +func (r *RepositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation, repositoryType string) (*velerov1api.BackupRepository, error) { + if volumeNamespace == "" || backupLocation == "" || repositoryType == "" { + return nil, errors.Errorf("wrong parameters, namespace %q, backup storage location %q, repository type %q", volumeNamespace, backupLocation, repositoryType) } -} -func (r *RepositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation string) (*velerov1api.BackupRepository, error) { - log := r.log.WithField("volumeNamespace", volumeNamespace).WithField("backupLocation", backupLocation) + backupRepoKey := BackupRepositoryKey{volumeNamespace, backupLocation, repositoryType} + + log := r.log.WithField("volumeNamespace", volumeNamespace).WithField("backupLocation", backupLocation).WithField("repositoryType", repositoryType) // It's only safe to have one instance of this method executing concurrently for a - // given volumeNamespace + backupLocation, so synchronize based on that. It's fine - // to run concurrently for *different* namespaces/locations. If you had 2 goroutines - // running this for the same inputs, both might find no ResticRepository exists, then - // both would create new ones for the same namespace/location. + // given BackupRepositoryKey, so synchronize based on that. It's fine + // to run concurrently for *different* BackupRepositoryKey. If you had 2 goroutines + // running this for the same inputs, both might find no BackupRepository exists, then + // both would create new ones for the same BackupRepositoryKey. // // This issue could probably be avoided if we had a deterministic name for - // each restic repository, and we just tried to create it, checked for an + // each BackupRepository and we just tried to create it, checked for an // AlreadyExists err, and then waited for it to be ready. However, there are // already repositories in the wild with non-deterministic names (i.e. using // GenerateName) which poses a backwards compatibility problem. log.Debug("Acquiring lock") - repoMu := r.repoLock(volumeNamespace, backupLocation) + repoMu := r.repoLock(backupRepoKey) repoMu.Lock() defer func() { repoMu.Unlock() log.Debug("Released lock") }() - log.Debug("Acquired lock") - - selector := labels.SelectorFromSet(repoLabels(volumeNamespace, backupLocation)) - - repos, err := r.repoLister.BackupRepositories(namespace).List(selector) - if err != nil { - return nil, errors.WithStack(err) - } - if len(repos) > 1 { - return nil, errors.Errorf("more than one ResticRepository found for workload namespace %q, backup storage location %q", volumeNamespace, backupLocation) + repo, err := GetBackupRepository(ctx, r.repoClient, namespace, backupRepoKey, true) + if err == nil { + log.Debug("Ready repository found") + return repo, nil } - if len(repos) == 1 { - if repos[0].Status.Phase != velerov1api.BackupRepositoryPhaseReady { - return nil, errors.Errorf("restic repository is not ready: %s", repos[0].Status.Message) - } - log.Debug("Ready repository found") - return repos[0], nil + if !isBackupRepositoryNotFoundError(err) { + return nil, errors.WithStack(err) } log.Debug("No repository found, creating one") // no repo found: create one and wait for it to be ready - repo := &velerov1api.BackupRepository{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - GenerateName: fmt.Sprintf("%s-%s-", volumeNamespace, backupLocation), - Labels: repoLabels(volumeNamespace, backupLocation), - }, - Spec: velerov1api.BackupRepositorySpec{ - VolumeNamespace: volumeNamespace, - BackupStorageLocation: backupLocation, - }, - } - - repoChan := r.getRepoChan(selector.String()) - defer func() { - delete(r.repoChans, selector.String()) - close(repoChan) - }() - - if _, err := r.repoClient.BackupRepositories(namespace).Create(context.TODO(), repo, metav1.CreateOptions{}); err != nil { - return nil, errors.Wrapf(err, "unable to create restic repository resource") - } - - select { - // repositories should become either ready or not ready quickly if they're - // newly created. - case <-time.After(time.Minute): - return nil, errors.New("timed out waiting for restic repository to become ready") - case <-ctx.Done(): - return nil, errors.New("timed out waiting for restic repository to become ready") - case res := <-repoChan: - - if res.Status.Phase == velerov1api.BackupRepositoryPhaseNotReady { - return nil, errors.Errorf("restic repository is not ready: %s", res.Status.Message) - } - - return res, nil - } + return r.createBackupRepositoryAndWait(ctx, namespace, backupRepoKey) } -func (r *RepositoryEnsurer) getRepoChan(name string) chan *velerov1api.BackupRepository { - r.repoChansLock.Lock() - defer r.repoChansLock.Unlock() - - r.repoChans[name] = make(chan *velerov1api.BackupRepository) - return r.repoChans[name] -} - -func (r *RepositoryEnsurer) repoLock(volumeNamespace, backupLocation string) *sync.Mutex { +func (r *RepositoryEnsurer) repoLock(key BackupRepositoryKey) *sync.Mutex { r.repoLocksMu.Lock() defer r.repoLocksMu.Unlock() - key := repoKey{ - volumeNamespace: volumeNamespace, - backupLocation: backupLocation, - } - if r.repoLocks[key] == nil { r.repoLocks[key] = new(sync.Mutex) } return r.repoLocks[key] } + +func (r *RepositoryEnsurer) createBackupRepositoryAndWait(ctx context.Context, namespace string, backupRepoKey BackupRepositoryKey) (*velerov1api.BackupRepository, error) { + toCreate := newBackupRepository(namespace, backupRepoKey) + if err := r.repoClient.Create(ctx, toCreate, &client.CreateOptions{}); err != nil { + return nil, errors.Wrap(err, "unable to create backup repository resource") + } + + var repo *velerov1api.BackupRepository + checkFunc := func(ctx context.Context) (bool, error) { + found, err := GetBackupRepository(ctx, r.repoClient, namespace, backupRepoKey, true) + if err == nil { + repo = found + return true, nil + } else if isBackupRepositoryNotFoundError(err) || isBackupRepositoryNotProvisionedError(err) { + return false, nil + } else { + return false, err + } + } + + err := wait.PollWithContext(ctx, time.Millisecond*500, time.Minute, checkFunc) + if err != nil { + return nil, errors.Wrap(err, "failed to wait BackupRepository") + } else { + return repo, nil + } +} diff --git a/pkg/repository/manager.go b/pkg/repository/manager.go index eb700d1062..2ffa147805 100644 --- a/pkg/repository/manager.go +++ b/pkg/repository/manager.go @@ -19,6 +19,7 @@ package repository import ( "context" "fmt" + "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -27,21 +28,41 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/repository/provider" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) +// SnapshotIdentifier uniquely identifies a restic snapshot +// taken by Velero. +type SnapshotIdentifier struct { + // VolumeNamespace is the namespace of the pod/volume that + // the restic snapshot is for. + VolumeNamespace string + + // BackupStorageLocation is the backup's storage location + // name. + BackupStorageLocation string + + // SnapshotID is the short ID of the restic snapshot. + SnapshotID string + + // RepositoryType is the type of the repository where the + // snapshot is stored + RepositoryType string +} + // Manager manages backup repositories. type Manager interface { // InitRepo initializes a repo with the specified name and identifier. InitRepo(repo *velerov1api.BackupRepository) error - // ConnectToRepo runs the 'restic snapshots' command against the - // specified repo, and returns an error if it fails. This is - // intended to be used to ensure that the repo exists/can be - // authenticated to. + // ConnectToRepo tries to connect to the specified repo, and returns an error if it fails. + // This is intended to be used to ensure that the repo exists/can be authenticated to. ConnectToRepo(repo *velerov1api.BackupRepository) error + // PrepareRepo tries to connect to the specific repo first, if it fails because of the + // repo is not initialized, it turns to initialize the repo + PrepareRepo(repo *velerov1api.BackupRepository) error + // PruneRepo deletes unused data from a repo. PruneRepo(repo *velerov1api.BackupRepository) error @@ -50,7 +71,9 @@ type Manager interface { // Forget removes a snapshot from the list of // available snapshots in a repo. - Forget(context.Context, restic.SnapshotIdentifier) error + Forget(context.Context, SnapshotIdentifier) error + // DefaultMaintenanceFrequency returns the default maintenance frequency from the specific repo + DefaultMaintenanceFrequency(repo *velerov1api.BackupRepository) (time.Duration, error) } type manager struct { @@ -70,6 +93,7 @@ func NewManager( repoLocker *RepoLocker, repoEnsurer *RepositoryEnsurer, credentialFileStore credentials.FileStore, + credentialSecretStore credentials.SecretStore, log logrus.FieldLogger, ) Manager { mgr := &manager{ @@ -83,6 +107,10 @@ func NewManager( } mgr.providers[velerov1api.BackupRepositoryTypeRestic] = provider.NewResticRepositoryProvider(credentialFileStore, mgr.fileSystem, mgr.log) + mgr.providers[velerov1api.BackupRepositoryTypeKopia] = provider.NewUnifiedRepoProvider(credentials.CredentialGetter{ + FromFile: credentialFileStore, + FromSecret: credentialSecretStore, + }, velerov1api.BackupRepositoryTypeKopia, mgr.log) return mgr } @@ -117,6 +145,21 @@ func (m *manager) ConnectToRepo(repo *velerov1api.BackupRepository) error { return prd.ConnectToRepo(context.Background(), param) } +func (m *manager) PrepareRepo(repo *velerov1api.BackupRepository) error { + m.repoLocker.Lock(repo.Name) + defer m.repoLocker.Unlock(repo.Name) + + prd, err := m.getRepositoryProvider(repo) + if err != nil { + return errors.WithStack(err) + } + param, err := m.assembleRepoParam(repo) + if err != nil { + return errors.WithStack(err) + } + return prd.PrepareRepo(context.Background(), param) +} + func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error { m.repoLocker.LockExclusive(repo.Name) defer m.repoLocker.UnlockExclusive(repo.Name) @@ -147,8 +190,8 @@ func (m *manager) UnlockRepo(repo *velerov1api.BackupRepository) error { return prd.EnsureUnlockRepo(context.Background(), param) } -func (m *manager) Forget(ctx context.Context, snapshot restic.SnapshotIdentifier) error { - repo, err := m.repoEnsurer.EnsureRepo(ctx, m.namespace, snapshot.VolumeNamespace, snapshot.BackupStorageLocation) +func (m *manager) Forget(ctx context.Context, snapshot SnapshotIdentifier) error { + repo, err := m.repoEnsurer.EnsureRepo(ctx, m.namespace, snapshot.VolumeNamespace, snapshot.BackupStorageLocation, snapshot.RepositoryType) if err != nil { return err } @@ -167,10 +210,26 @@ func (m *manager) Forget(ctx context.Context, snapshot restic.SnapshotIdentifier return prd.Forget(context.Background(), snapshot.SnapshotID, param) } +func (m *manager) DefaultMaintenanceFrequency(repo *velerov1api.BackupRepository) (time.Duration, error) { + prd, err := m.getRepositoryProvider(repo) + if err != nil { + return 0, errors.WithStack(err) + } + + param, err := m.assembleRepoParam(repo) + if err != nil { + return 0, errors.WithStack(err) + } + + return prd.DefaultMaintenanceFrequency(context.Background(), param), nil +} + func (m *manager) getRepositoryProvider(repo *velerov1api.BackupRepository) (provider.Provider, error) { switch repo.Spec.RepositoryType { case "", velerov1api.BackupRepositoryTypeRestic: return m.providers[velerov1api.BackupRepositoryTypeRestic], nil + case velerov1api.BackupRepositoryTypeKopia: + return m.providers[velerov1api.BackupRepositoryTypeKopia], nil default: return nil, fmt.Errorf("failed to get provider for repository %s", repo.Spec.RepositoryType) } diff --git a/pkg/repository/manager_test.go b/pkg/repository/manager_test.go index 7692a8b219..4d84919d2a 100644 --- a/pkg/repository/manager_test.go +++ b/pkg/repository/manager_test.go @@ -26,7 +26,7 @@ import ( ) func TestGetRepositoryProvider(t *testing.T) { - mgr := NewManager("", nil, nil, nil, nil, nil).(*manager) + mgr := NewManager("", nil, nil, nil, nil, nil, nil).(*manager) repo := &velerov1.BackupRepository{} // empty repository type diff --git a/pkg/repository/mocks/Manager.go b/pkg/repository/mocks/Manager.go new file mode 100644 index 0000000000..5508ce9581 --- /dev/null +++ b/pkg/repository/mocks/Manager.go @@ -0,0 +1,139 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + repository "github.com/vmware-tanzu/velero/pkg/repository" + + time "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// Manager is an autogenerated mock type for the Manager type +type Manager struct { + mock.Mock +} + +// ConnectToRepo provides a mock function with given fields: repo +func (_m *Manager) ConnectToRepo(repo *v1.BackupRepository) error { + ret := _m.Called(repo) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { + r0 = rf(repo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DefaultMaintenanceFrequency provides a mock function with given fields: repo +func (_m *Manager) DefaultMaintenanceFrequency(repo *v1.BackupRepository) (time.Duration, error) { + ret := _m.Called(repo) + + var r0 time.Duration + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) time.Duration); ok { + r0 = rf(repo) + } else { + r0 = ret.Get(0).(time.Duration) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*v1.BackupRepository) error); ok { + r1 = rf(repo) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Forget provides a mock function with given fields: _a0, _a1 +func (_m *Manager) Forget(_a0 context.Context, _a1 repository.SnapshotIdentifier) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, repository.SnapshotIdentifier) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InitRepo provides a mock function with given fields: repo +func (_m *Manager) InitRepo(repo *v1.BackupRepository) error { + ret := _m.Called(repo) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { + r0 = rf(repo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PrepareRepo provides a mock function with given fields: repo +func (_m *Manager) PrepareRepo(repo *v1.BackupRepository) error { + ret := _m.Called(repo) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { + r0 = rf(repo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PruneRepo provides a mock function with given fields: repo +func (_m *Manager) PruneRepo(repo *v1.BackupRepository) error { + ret := _m.Called(repo) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { + r0 = rf(repo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnlockRepo provides a mock function with given fields: repo +func (_m *Manager) UnlockRepo(repo *v1.BackupRepository) error { + ret := _m.Called(repo) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { + r0 = rf(repo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewManager interface { + mock.TestingT + Cleanup(func()) +} + +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewManager(t mockConstructorTestingTNewManager) *Manager { + mock := &Manager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/mocks/repository_manager.go b/pkg/repository/mocks/repository_manager.go deleted file mode 100644 index 5533706741..0000000000 --- a/pkg/repository/mocks/repository_manager.go +++ /dev/null @@ -1,148 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.*/ -// Code generated by mockery v2.10.2. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - restic "github.com/vmware-tanzu/velero/pkg/restic" - - v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/podvolume" -) - -// RepositoryManager is an autogenerated mock type for the RepositoryManager type -type RepositoryManager struct { - mock.Mock -} - -// ConnectToRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) ConnectToRepo(repo *v1.BackupRepository) error { - ret := _m.Called(repo) - - var r0 error - if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { - r0 = rf(repo) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Forget provides a mock function with given fields: _a0, _a1 -func (_m *RepositoryManager) Forget(_a0 context.Context, _a1 restic.SnapshotIdentifier) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, restic.SnapshotIdentifier) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// InitRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) InitRepo(repo *v1.BackupRepository) error { - ret := _m.Called(repo) - - var r0 error - if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { - r0 = rf(repo) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewBackupper provides a mock function with given fields: _a0, _a1 -func (_m *RepositoryManager) NewBackupper(_a0 context.Context, _a1 *v1.Backup) (podvolume.Backupper, error) { - ret := _m.Called(_a0, _a1) - - var r0 podvolume.Backupper - if rf, ok := ret.Get(0).(func(context.Context, *v1.Backup) podvolume.Backupper); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(podvolume.Backupper) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *v1.Backup) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewRestorer provides a mock function with given fields: _a0, _a1 -func (_m *RepositoryManager) NewRestorer(_a0 context.Context, _a1 *v1.Restore) (podvolume.Restorer, error) { - ret := _m.Called(_a0, _a1) - - var r0 podvolume.Restorer - if rf, ok := ret.Get(0).(func(context.Context, *v1.Restore) podvolume.Restorer); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(podvolume.Restorer) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *v1.Restore) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PruneRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) PruneRepo(repo *v1.BackupRepository) error { - ret := _m.Called(repo) - - var r0 error - if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { - r0 = rf(repo) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UnlockRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) UnlockRepo(repo *v1.BackupRepository) error { - ret := _m.Called(repo) - - var r0 error - if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { - r0 = rf(repo) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/repository/mocks/repository_writer.go b/pkg/repository/mocks/repository_writer.go new file mode 100644 index 0000000000..c3e9964bef --- /dev/null +++ b/pkg/repository/mocks/repository_writer.go @@ -0,0 +1,349 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + context "context" + + index "github.com/kopia/kopia/repo/content/index" + manifest "github.com/kopia/kopia/repo/manifest" + + mock "github.com/stretchr/testify/mock" + + object "github.com/kopia/kopia/repo/object" + + repo "github.com/kopia/kopia/repo" + + time "time" +) + +// RepositoryWriter is an autogenerated mock type for the RepositoryWriter type +type RepositoryWriter struct { + mock.Mock +} + +// ClientOptions provides a mock function with given fields: +func (_m *RepositoryWriter) ClientOptions() repo.ClientOptions { + ret := _m.Called() + + var r0 repo.ClientOptions + if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(repo.ClientOptions) + } + + return r0 +} + +// Close provides a mock function with given fields: ctx +func (_m *RepositoryWriter) Close(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ContentInfo provides a mock function with given fields: ctx, contentID +func (_m *RepositoryWriter) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) { + ret := _m.Called(ctx, contentID) + + var r0 index.Info + if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok { + r0 = rf(ctx, contentID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(index.Info) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok { + r1 = rf(ctx, contentID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteManifest provides a mock function with given fields: ctx, id +func (_m *RepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindManifests provides a mock function with given fields: ctx, labels +func (_m *RepositoryWriter) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, labels) + + var r0 []*manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok { + r0 = rf(ctx, labels) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok { + r1 = rf(ctx, labels) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Flush provides a mock function with given fields: ctx +func (_m *RepositoryWriter) Flush(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetManifest provides a mock function with given fields: ctx, id, data +func (_m *RepositoryWriter) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, id, data) + + var r0 *manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok { + r0 = rf(ctx, id, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok { + r1 = rf(ctx, id, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewObjectWriter provides a mock function with given fields: ctx, opt +func (_m *RepositoryWriter) NewObjectWriter(ctx context.Context, opt object.WriterOptions) object.Writer { + ret := _m.Called(ctx, opt) + + var r0 object.Writer + if rf, ok := ret.Get(0).(func(context.Context, object.WriterOptions) object.Writer); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(object.Writer) + } + } + + return r0 +} + +// NewWriter provides a mock function with given fields: ctx, opt +func (_m *RepositoryWriter) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) { + ret := _m.Called(ctx, opt) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + var r1 repo.RepositoryWriter + if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok { + r1 = rf(ctx, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(repo.RepositoryWriter) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok { + r2 = rf(ctx, opt) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// OpenObject provides a mock function with given fields: ctx, id +func (_m *RepositoryWriter) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) { + ret := _m.Called(ctx, id) + + var r0 object.Reader + if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(object.Reader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint +func (_m *RepositoryWriter) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID { + ret := _m.Called(ctx, contentIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok { + r0 = rf(ctx, contentIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + return r0 +} + +// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint +func (_m *RepositoryWriter) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) { + ret := _m.Called(ctx, objectIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok { + r0 = rf(ctx, objectIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok { + r1 = rf(ctx, objectIDs, hint) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PutManifest provides a mock function with given fields: ctx, labels, payload +func (_m *RepositoryWriter) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) { + ret := _m.Called(ctx, labels, payload) + + var r0 manifest.ID + if rf, ok := ret.Get(0).(func(context.Context, map[string]string, interface{}) manifest.ID); ok { + r0 = rf(ctx, labels, payload) + } else { + r0 = ret.Get(0).(manifest.ID) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, map[string]string, interface{}) error); ok { + r1 = rf(ctx, labels, payload) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Refresh provides a mock function with given fields: ctx +func (_m *RepositoryWriter) Refresh(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Time provides a mock function with given fields: +func (_m *RepositoryWriter) Time() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// UpdateDescription provides a mock function with given fields: d +func (_m *RepositoryWriter) UpdateDescription(d string) { + _m.Called(d) +} + +// VerifyObject provides a mock function with given fields: ctx, id +func (_m *RepositoryWriter) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) { + ret := _m.Called(ctx, id) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/repository/provider/provider.go b/pkg/repository/provider/provider.go index 8e6a639a4a..6579386d6e 100644 --- a/pkg/repository/provider/provider.go +++ b/pkg/repository/provider/provider.go @@ -18,6 +18,7 @@ package provider import ( "context" + "time" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) @@ -28,28 +29,29 @@ type RepoParam struct { BackupRepo *velerov1api.BackupRepository } +// Provider defines the methods to manipulate a backup repository type Provider interface { - //InitRepo is to initialize a repository from a new storage place + // InitRepo is to initialize a repository from a new storage place InitRepo(ctx context.Context, param RepoParam) error - //ConnectToRepo is to establish the connection to a - //storage place that a repository is already initialized + // ConnectToRepo is to establish the connection to a + // storage place that a repository is already initialized ConnectToRepo(ctx context.Context, param RepoParam) error - //PrepareRepo is a combination of InitRepo and ConnectToRepo, - //it may do initializing + connecting, connecting only if the repository - //is already initialized, or do nothing if the repository is already connected + // PrepareRepo is a combination of InitRepo and ConnectToRepo, + // it may do initializing + connecting, connecting only if the repository + // is already initialized, or do nothing if the repository is already connected PrepareRepo(ctx context.Context, param RepoParam) error - //PruneRepo does a full prune/maintenance of the repository + // PruneRepo does a full prune/maintenance of the repository PruneRepo(ctx context.Context, param RepoParam) error - //PruneRepoQuick does a quick prune/maintenance of the repository if available - PruneRepoQuick(ctx context.Context, param RepoParam) error - - //EnsureUnlockRepo esures to remove any stale file locks in the storage + // EnsureUnlockRepo esures to remove any stale file locks in the storage EnsureUnlockRepo(ctx context.Context, param RepoParam) error - //Forget is to delete a snapshot from the repository + // Forget is to delete a snapshot from the repository Forget(ctx context.Context, snapshotID string, param RepoParam) error + + // DefaultMaintenanceFrequency returns the default frequency to run maintenance + DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration } diff --git a/pkg/repository/provider/restic.go b/pkg/repository/provider/restic.go index 3659f1be76..65038f0f22 100644 --- a/pkg/repository/provider/restic.go +++ b/pkg/repository/provider/restic.go @@ -18,6 +18,8 @@ package provider import ( "context" + "strings" + "time" "github.com/sirupsen/logrus" @@ -45,21 +47,25 @@ func (r *resticRepositoryProvider) ConnectToRepo(ctx context.Context, param Repo } func (r *resticRepositoryProvider) PrepareRepo(ctx context.Context, param RepoParam) error { - if err := r.InitRepo(ctx, param); err != nil { + if err := r.ConnectToRepo(ctx, param); err != nil { + // If the repository has not yet been initialized, the error message will always include + // the following string. This is the only scenario where we should try to initialize it. + // Other errors (e.g. "already locked") should be returned as-is since the repository + // does already exist, but it can't be connected to. + if strings.Contains(err.Error(), "Is there a repository at the following location?") { + return r.InitRepo(ctx, param) + } + return err } - return r.ConnectToRepo(ctx, param) + + return nil } func (r *resticRepositoryProvider) PruneRepo(ctx context.Context, param RepoParam) error { return r.svc.PruneRepo(param.BackupLocation, param.BackupRepo) } -func (r *resticRepositoryProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error { - // restic doesn't support this operation - return nil -} - func (r *resticRepositoryProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error { return r.svc.UnlockRepo(param.BackupLocation, param.BackupRepo) } @@ -67,3 +73,7 @@ func (r *resticRepositoryProvider) EnsureUnlockRepo(ctx context.Context, param R func (r *resticRepositoryProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error { return r.svc.Forget(param.BackupLocation, param.BackupRepo, snapshotID) } + +func (r *resticRepositoryProvider) DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration { + return r.svc.DefaultMaintenanceFrequency() +} diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 994d0a5ca2..6bb4f38445 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -19,8 +19,11 @@ package provider import ( "context" "fmt" + "net/url" "path" + "strconv" "strings" + "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -37,6 +40,7 @@ type unifiedRepoProvider struct { credentialGetter credentials.CredentialGetter workPath string repoService udmrepo.BackupRepoService + repoBackend string log logrus.FieldLogger } @@ -49,7 +53,7 @@ var getS3BucketRegion = repoconfig.GetAWSBucketRegion var getAzureStorageDomain = repoconfig.GetAzureStorageDomain type localFuncTable struct { - getStorageVariables func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) + getStorageVariables func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) getStorageCredentials func(*velerov1api.BackupStorageLocation, credentials.FileStore) (map[string]string, error) } @@ -59,41 +63,49 @@ var funcTable = localFuncTable{ } const ( - repoOpDescFullMaintain = "full maintenance" - repoOpDescQuickMaintain = "quick maintenance" - repoOpDescForget = "forget" + repoOpDescMaintain = "repo maintenance" + repoOpDescForget = "forget" + + repoConnectDesc = "unfied repo" ) // NewUnifiedRepoProvider creates the service provider for Unified Repo func NewUnifiedRepoProvider( credentialGetter credentials.CredentialGetter, + repoBackend string, log logrus.FieldLogger, -) (Provider, error) { +) Provider { repo := unifiedRepoProvider{ credentialGetter: credentialGetter, + repoBackend: repoBackend, log: log, } repo.repoService = createRepoService(log) - log.Debug("Finished create unified repo service") - - return &repo, nil + return &repo } func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) error { log := urp.log.WithFields(logrus.Fields{ - "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, + "BSL name": param.BackupLocation.Name, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, }) log.Debug("Start to init repo") repoOption, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), + udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), + udmrepo.WithGenOptions( + map[string]string{ + udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(), + udmrepo.GenOptionOwnerDomain: udmrepo.GetRepoDomain(), + }, + ), udmrepo.WithStoreOptions(urp, param), - udmrepo.WithDescription(repoOpDescFullMaintain), + udmrepo.WithDescription(repoConnectDesc), ) if err != nil { @@ -112,17 +124,24 @@ func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) e func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoParam) error { log := urp.log.WithFields(logrus.Fields{ - "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, + "BSL name": param.BackupLocation.Name, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, }) log.Debug("Start to connect repo") repoOption, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), + udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), + udmrepo.WithGenOptions( + map[string]string{ + udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(), + udmrepo.GenOptionOwnerDomain: udmrepo.GetRepoDomain(), + }, + ), udmrepo.WithStoreOptions(urp, param), - udmrepo.WithDescription(repoOpDescFullMaintain), + udmrepo.WithDescription(repoConnectDesc), ) if err != nil { @@ -141,17 +160,24 @@ func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoPar func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam) error { log := urp.log.WithFields(logrus.Fields{ - "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, + "BSL name": param.BackupLocation.Name, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, }) log.Debug("Start to prepare repo") repoOption, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), + udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), + udmrepo.WithGenOptions( + map[string]string{ + udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(), + udmrepo.GenOptionOwnerDomain: udmrepo.GetRepoDomain(), + }, + ), udmrepo.WithStoreOptions(urp, param), - udmrepo.WithDescription(repoOpDescFullMaintain), + udmrepo.WithDescription(repoConnectDesc), ) if err != nil { @@ -176,17 +202,17 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) error { log := urp.log.WithFields(logrus.Fields{ - "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, + "BSL name": param.BackupLocation.Name, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, }) log.Debug("Start to prune repo") repoOption, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), - udmrepo.WithGenOptions(map[string]string{udmrepo.GenOptionMaintainMode: udmrepo.GenOptionMaintainFull}), - udmrepo.WithDescription(repoOpDescFullMaintain), + udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), + udmrepo.WithDescription(repoOpDescMaintain), ) if err != nil { @@ -203,35 +229,6 @@ func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) return nil } -func (urp *unifiedRepoProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error { - log := urp.log.WithFields(logrus.Fields{ - "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, - }) - - log.Debug("Start to prune repo quick") - - repoOption, err := udmrepo.NewRepoOptions( - udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), - udmrepo.WithGenOptions(map[string]string{udmrepo.GenOptionMaintainMode: udmrepo.GenOptionMaintainQuick}), - udmrepo.WithDescription(repoOpDescQuickMaintain), - ) - - if err != nil { - return errors.Wrap(err, "error to get repo options") - } - - err = urp.repoService.Maintain(ctx, *repoOption) - if err != nil { - return errors.Wrap(err, "error to prune backup repo quick") - } - - log.Debug("Prune repo quick complete") - - return nil -} - func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error { return nil } @@ -239,7 +236,8 @@ func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param Repo func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error { log := urp.log.WithFields(logrus.Fields{ "BSL name": param.BackupLocation.Name, - "BSL UID": param.BackupLocation.UID, + "repo name": param.BackupRepo.Name, + "repo UID": param.BackupRepo.UID, "snapshotID": snapshotID, }) @@ -247,7 +245,7 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p repoOption, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(urp, param), - udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)), + udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), udmrepo.WithDescription(repoOpDescForget), ) @@ -277,10 +275,14 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p return nil } +func (urp *unifiedRepoProvider) DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration { + return urp.repoService.DefaultMaintenanceFrequency() +} + func (urp *unifiedRepoProvider) GetPassword(param interface{}) (string, error) { repoParam, ok := param.(RepoParam) if !ok { - return "", errors.New("invalid parameter") + return "", errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } repoPassword, err := getRepoPassword(urp.credentialGetter.FromSecret, repoParam) @@ -294,7 +296,7 @@ func (urp *unifiedRepoProvider) GetPassword(param interface{}) (string, error) { func (urp *unifiedRepoProvider) GetStoreType(param interface{}) (string, error) { repoParam, ok := param.(RepoParam) if !ok { - return "", errors.New("invalid parameter") + return "", errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } return getStorageType(repoParam.BackupLocation), nil @@ -303,10 +305,10 @@ func (urp *unifiedRepoProvider) GetStoreType(param interface{}) (string, error) func (urp *unifiedRepoProvider) GetStoreOptions(param interface{}) (map[string]string, error) { repoParam, ok := param.(RepoParam) if !ok { - return map[string]string{}, errors.New("invalid parameter") + return map[string]string{}, errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } - storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, repoParam.BackupRepo.Spec.VolumeNamespace) + storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace) if err != nil { return map[string]string{}, errors.Wrap(err, "error to get storage variables") } @@ -409,7 +411,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr return result, nil } -func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoName string) (map[string]string, error) { +func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string) (map[string]string, error) { result := make(map[string]string) backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) @@ -429,12 +431,13 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo prefix = strings.Trim(backupLocation.Spec.ObjectStorage.Prefix, "/") } - prefix = path.Join(prefix, udmrepo.StoreOptionPrefixName, repoName) + "/" + prefix = path.Join(prefix, repoBackend, repoName) + "/" region := config["region"] if backendType == repoconfig.AWSBackend { s3Url := config["s3Url"] + disableTls := false var err error if s3Url == "" { @@ -444,10 +447,24 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo } s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region) + disableTls = false + } else { + url, err := url.Parse(s3Url) + if err != nil { + return map[string]string{}, errors.Wrapf(err, "error to parse s3Url %s", s3Url) + } + + if url.Path != "" && url.Path != "/" { + return map[string]string{}, errors.Errorf("path is not expected in s3Url %s", s3Url) + } + + s3Url = url.Host + disableTls = (url.Scheme == "http") } result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3Url, "/") result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"] + result[udmrepo.StoreOptionS3DisableTls] = strconv.FormatBool(disableTls) } else if backendType == repoconfig.AzureBackend { result[udmrepo.StoreOptionAzureDomain] = getAzureStorageDomain(config) } diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index 8e41b9b41b..e6f891cce7 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -233,6 +233,7 @@ func TestGetStorageVariables(t *testing.T) { name string backupLocation velerov1api.BackupStorageLocation repoName string + repoBackend string getS3BucketRegion func(string) (string, error) getAzureStorageDomain func(map[string]string) string expected map[string]string @@ -249,7 +250,7 @@ func TestGetStorageVariables(t *testing.T) { expectedErr: "invalid storage provider", }, { - name: "aws, ObjectStorage section not exists in BSL, s3Url exist", + name: "aws, ObjectStorage section not exists in BSL, s3Url exist, https", backupLocation: velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ Provider: "velero.io/aws", @@ -257,20 +258,40 @@ func TestGetStorageVariables(t *testing.T) { "bucket": "fake-bucket", "prefix": "fake-prefix", "region": "fake-region/", - "s3Url": "fake-url", + "s3Url": "https://fake-url/", "insecureSkipTLSVerify": "true", }, }, }, + repoBackend: "fake-repo-type", expected: map[string]string{ "bucket": "fake-bucket", - "prefix": "fake-prefix/unified-repo/", + "prefix": "fake-prefix/fake-repo-type/", "region": "fake-region", "fspath": "", "endpoint": "fake-url", + "doNotUseTLS": "false", "skipTLSVerify": "true", }, }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url exist, invalid", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region/", + "s3Url": "https://fake-url/fake-path", + "insecureSkipTLSVerify": "true", + }, + }, + }, + repoBackend: "fake-repo-type", + expected: map[string]string{}, + expectedErr: "path is not expected in s3Url https://fake-url/fake-path", + }, { name: "aws, ObjectStorage section not exists in BSL, s3Url not exist", backupLocation: velerov1api.BackupStorageLocation{ @@ -286,12 +307,14 @@ func TestGetStorageVariables(t *testing.T) { getS3BucketRegion: func(bucket string) (string, error) { return "region from bucket: " + bucket, nil }, + repoBackend: "fake-repo-type", expected: map[string]string{ "bucket": "fake-bucket", - "prefix": "fake-prefix/unified-repo/", + "prefix": "fake-prefix/fake-repo-type/", "region": "region from bucket: fake-bucket", "fspath": "", "endpoint": "s3-region from bucket: fake-bucket.amazonaws.com", + "doNotUseTLS": "false", "skipTLSVerify": "false", }, }, @@ -310,7 +333,7 @@ func TestGetStorageVariables(t *testing.T) { expectedErr: "error get s3 bucket region: fake error", }, { - name: "aws, ObjectStorage section exists in BSL, s3Url exist", + name: "aws, ObjectStorage section exists in BSL, s3Url exist, http", backupLocation: velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ Provider: "velero.io/aws", @@ -318,7 +341,7 @@ func TestGetStorageVariables(t *testing.T) { "bucket": "fake-bucket-config", "prefix": "fake-prefix-config", "region": "fake-region", - "s3Url": "fake-url", + "s3Url": "http://fake-url/", "insecureSkipTLSVerify": "false", }, StorageType: velerov1api.StorageType{ @@ -332,12 +355,14 @@ func TestGetStorageVariables(t *testing.T) { getS3BucketRegion: func(bucket string) (string, error) { return "region from bucket: " + bucket, nil }, + repoBackend: "fake-repo-type", expected: map[string]string{ "bucket": "fake-bucket-object-store", - "prefix": "fake-prefix-object-store/unified-repo/", + "prefix": "fake-prefix-object-store/fake-repo-type/", "region": "fake-region", "fspath": "", "endpoint": "fake-url", + "doNotUseTLS": "true", "skipTLSVerify": "false", }, }, @@ -364,9 +389,10 @@ func TestGetStorageVariables(t *testing.T) { getAzureStorageDomain: func(config map[string]string) string { return config["storageDomain"] }, + repoBackend: "fake-repo-type", expected: map[string]string{ "bucket": "fake-bucket-object-store", - "prefix": "fake-prefix-object-store/unified-repo/", + "prefix": "fake-prefix-object-store/fake-repo-type/", "region": "fake-region", "fspath": "", "storageDomain": "fake-domain", @@ -386,13 +412,14 @@ func TestGetStorageVariables(t *testing.T) { }, }, }, - repoName: "//fake-name//", + repoName: "//fake-name//", + repoBackend: "fake-repo-type", getAzureStorageDomain: func(config map[string]string) string { return config["storageDomain"] }, expected: map[string]string{ "bucket": "fake-bucket", - "prefix": "fake-prefix/unified-repo/fake-name/", + "prefix": "fake-prefix/fake-repo-type/fake-name/", "region": "fake-region", "fspath": "", "storageDomain": "fake-domain", @@ -409,10 +436,11 @@ func TestGetStorageVariables(t *testing.T) { }, }, }, + repoBackend: "fake-repo-type", expected: map[string]string{ "fspath": "fake-path", "bucket": "", - "prefix": "fake-prefix/unified-repo/", + "prefix": "fake-prefix/fake-repo-type/", "region": "", }, }, @@ -423,7 +451,7 @@ func TestGetStorageVariables(t *testing.T) { getS3BucketRegion = tc.getS3BucketRegion getAzureStorageDomain = tc.getAzureStorageDomain - actual, err := getStorageVariables(&tc.backupLocation, tc.repoName) + actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName) require.Equal(t, tc.expected, actual) @@ -503,7 +531,7 @@ func TestGetStoreOptions(t *testing.T) { name: "wrong param type", repoParam: struct{}{}, expected: map[string]string{}, - expectedErr: "invalid parameter", + expectedErr: "invalid parameter, expect provider.RepoParam, actual struct {}", }, { name: "get storage variable fail", @@ -512,7 +540,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, errors.New("fake-error-2") }, }, @@ -526,7 +554,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -586,7 +614,7 @@ func TestPrepareRepo(t *testing.T) { repoService: new(reposervicenmocks.BackupRepoService), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, errors.New("fake-store-option-error") }, }, @@ -597,7 +625,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -618,7 +646,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -695,7 +723,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -719,7 +747,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -775,6 +803,7 @@ func TestForget(t *testing.T) { err := urp.Forget(context.Background(), "", RepoParam{ BackupLocation: &velerov1api.BackupStorageLocation{}, + BackupRepo: &velerov1api.BackupRepository{}, }) if tc.expectedErr == "" { diff --git a/pkg/repository/restic/repository.go b/pkg/repository/restic/repository.go index fa88a9cc44..392caf2842 100644 --- a/pkg/repository/restic/repository.go +++ b/pkg/repository/restic/repository.go @@ -18,6 +18,7 @@ package restic import ( "os" + "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -71,6 +72,10 @@ func (r *RepositoryService) Forget(bsl *velerov1api.BackupStorageLocation, repo return r.exec(restic.ForgetCommand(repo.Spec.ResticIdentifier, snapshotID), bsl) } +func (r *RepositoryService) DefaultMaintenanceFrequency() time.Duration { + return restic.DefaultMaintenanceFrequency +} + func (r *RepositoryService) exec(cmd *restic.Command, bsl *velerov1api.BackupStorageLocation) error { file, err := r.credentialsFileStore.Path(repokey.RepoKeySelector()) if err != nil { diff --git a/pkg/repository/udmrepo/kopialib/backend/azure.go b/pkg/repository/udmrepo/kopialib/backend/azure.go new file mode 100644 index 0000000000..7aedc33d8f --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/azure.go @@ -0,0 +1,60 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/blob/azure" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +type AzureBackend struct { + options azure.Options +} + +func (c *AzureBackend) Setup(ctx context.Context, flags map[string]string) error { + var err error + c.options.Container, err = mustHaveString(udmrepo.StoreOptionOssBucket, flags) + if err != nil { + return err + } + + c.options.StorageAccount, err = mustHaveString(udmrepo.StoreOptionAzureStorageAccount, flags) + if err != nil { + return err + } + + c.options.StorageKey, err = mustHaveString(udmrepo.StoreOptionAzureKey, flags) + if err != nil { + return err + } + + c.options.Prefix = optionalHaveString(udmrepo.StoreOptionPrefix, flags) + c.options.SASToken = optionalHaveString(udmrepo.StoreOptionAzureToken, flags) + c.options.StorageDomain = optionalHaveString(udmrepo.StoreOptionAzureDomain, flags) + + c.options.Limits = setupLimits(ctx, flags) + + return nil +} + +func (c *AzureBackend) Connect(ctx context.Context, isCreate bool) (blob.Storage, error) { + return azure.New(ctx, &c.options) +} diff --git a/pkg/repository/udmrepo/kopialib/backend/azure_test.go b/pkg/repository/udmrepo/kopialib/backend/azure_test.go new file mode 100644 index 0000000000..bc4997fbe7 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/azure_test.go @@ -0,0 +1,102 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + + "github.com/kopia/kopia/repo/blob/azure" + "github.com/kopia/kopia/repo/blob/throttling" +) + +func TestAzureSetup(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + expected azure.Options + expectedErr string + }{ + { + name: "must have bucket name", + flags: map[string]string{}, + expectedErr: "key " + udmrepo.StoreOptionOssBucket + " not found", + }, + { + name: "must have storage account", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + }, + expected: azure.Options{ + Container: "fake-bucket", + }, + expectedErr: "key " + udmrepo.StoreOptionAzureStorageAccount + " not found", + }, + { + name: "must have secret key", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + udmrepo.StoreOptionAzureStorageAccount: "fake-account", + }, + expected: azure.Options{ + Container: "fake-bucket", + StorageAccount: "fake-account", + }, + expectedErr: "key " + udmrepo.StoreOptionAzureKey + " not found", + }, + { + name: "with limits", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + udmrepo.StoreOptionAzureStorageAccount: "fake-account", + udmrepo.StoreOptionAzureKey: "fake-key", + udmrepo.ThrottleOptionReadOps: "100", + udmrepo.ThrottleOptionUploadBytes: "200", + }, + expected: azure.Options{ + Container: "fake-bucket", + StorageAccount: "fake-account", + StorageKey: "fake-key", + Limits: throttling.Limits{ + ReadsPerSecond: 100, + UploadBytesPerSecond: 200, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + azFlags := AzureBackend{} + + err := azFlags.Setup(context.Background(), tc.flags) + + require.Equal(t, tc.expected, azFlags.options) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/kopialib/backend/backend.go b/pkg/repository/udmrepo/kopialib/backend/backend.go new file mode 100644 index 0000000000..9993138636 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/backend.go @@ -0,0 +1,33 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + + "github.com/kopia/kopia/repo/blob" +) + +// Store defines the methods for Kopia to establish a connection to +// the backend storage +type Store interface { + // Setup setups the variables to a specific backend storage + Setup(ctx context.Context, flags map[string]string) error + + // Connect connects to a specific backend storage with the storage variables + Connect(ctx context.Context, isCreate bool) (blob.Storage, error) +} diff --git a/pkg/repository/udmrepo/kopialib/backend/common.go b/pkg/repository/udmrepo/kopialib/backend/common.go new file mode 100644 index 0000000000..7aa888e3a9 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/common.go @@ -0,0 +1,83 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "time" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/blob/throttling" + "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/encryption" + "github.com/kopia/kopia/repo/hashing" + "github.com/kopia/kopia/repo/object" + "github.com/kopia/kopia/repo/splitter" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +const ( + maxDataCacheMB = 2000 + maxMetadataCacheMB = 2000 + maxCacheDurationSecond = 30 +) + +func setupLimits(ctx context.Context, flags map[string]string) throttling.Limits { + return throttling.Limits{ + DownloadBytesPerSecond: optionalHaveFloat64(ctx, udmrepo.ThrottleOptionDownloadBytes, flags), + ListsPerSecond: optionalHaveFloat64(ctx, udmrepo.ThrottleOptionListOps, flags), + ReadsPerSecond: optionalHaveFloat64(ctx, udmrepo.ThrottleOptionReadOps, flags), + UploadBytesPerSecond: optionalHaveFloat64(ctx, udmrepo.ThrottleOptionUploadBytes, flags), + WritesPerSecond: optionalHaveFloat64(ctx, udmrepo.ThrottleOptionWriteOps, flags), + } +} + +// SetupNewRepositoryOptions setups the options when creating a new Kopia repository +func SetupNewRepositoryOptions(ctx context.Context, flags map[string]string) repo.NewRepositoryOptions { + return repo.NewRepositoryOptions{ + BlockFormat: content.FormattingOptions{ + Hash: optionalHaveStringWithDefault(udmrepo.StoreOptionGenHashAlgo, flags, hashing.DefaultAlgorithm), + Encryption: optionalHaveStringWithDefault(udmrepo.StoreOptionGenEncryptAlgo, flags, encryption.DefaultAlgorithm), + }, + + ObjectFormat: object.Format{ + Splitter: optionalHaveStringWithDefault(udmrepo.StoreOptionGenSplitAlgo, flags, splitter.DefaultAlgorithm), + }, + + RetentionMode: blob.RetentionMode(optionalHaveString(udmrepo.StoreOptionGenRetentionMode, flags)), + RetentionPeriod: optionalHaveDuration(ctx, udmrepo.StoreOptionGenRetentionPeriod, flags), + } +} + +// SetupConnectOptions setups the options when connecting to an existing Kopia repository +func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions { + return repo.ConnectOptions{ + CachingOptions: content.CachingOptions{ + MaxCacheSizeBytes: maxDataCacheMB << 20, + MaxMetadataCacheSizeBytes: maxMetadataCacheMB << 20, + MaxListCacheDuration: content.DurationSeconds(time.Duration(maxCacheDurationSecond) * time.Second), + }, + ClientOptions: repo.ClientOptions{ + Hostname: optionalHaveString(udmrepo.GenOptionOwnerDomain, repoOptions.GeneralOptions), + Username: optionalHaveString(udmrepo.GenOptionOwnerName, repoOptions.GeneralOptions), + ReadOnly: optionalHaveBool(ctx, udmrepo.StoreOptionGenReadOnly, repoOptions.GeneralOptions), + Description: repoOptions.Description, + }, + } +} diff --git a/pkg/repository/udmrepo/kopialib/backend/file_system.go b/pkg/repository/udmrepo/kopialib/backend/file_system.go new file mode 100644 index 0000000000..cf314ff598 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/file_system.go @@ -0,0 +1,62 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "path/filepath" + + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/blob/filesystem" + "github.com/pkg/errors" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +type FsBackend struct { + options filesystem.Options +} + +const ( + defaultFileMode = 0o600 + defaultDirMode = 0o700 +) + +func (c *FsBackend) Setup(ctx context.Context, flags map[string]string) error { + path, err := mustHaveString(udmrepo.StoreOptionFsPath, flags) + if err != nil { + return err + } + + prefix := optionalHaveString(udmrepo.StoreOptionPrefix, flags) + + c.options.Path = filepath.Join(path, prefix) + c.options.FileMode = defaultFileMode + c.options.DirectoryMode = defaultDirMode + + c.options.Limits = setupLimits(ctx, flags) + + return nil +} + +func (c *FsBackend) Connect(ctx context.Context, isCreate bool) (blob.Storage, error) { + if !filepath.IsAbs(c.options.Path) { + return nil, errors.Errorf("filesystem repository path is not absolute, path: %s", c.options.Path) + } + + return filesystem.New(ctx, &c.options, isCreate) +} diff --git a/pkg/repository/udmrepo/kopialib/backend/gcs.go b/pkg/repository/udmrepo/kopialib/backend/gcs.go new file mode 100644 index 0000000000..b998c97028 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/gcs.go @@ -0,0 +1,54 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/blob/gcs" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +type GCSBackend struct { + options gcs.Options +} + +func (c *GCSBackend) Setup(ctx context.Context, flags map[string]string) error { + var err error + c.options.BucketName, err = mustHaveString(udmrepo.StoreOptionOssBucket, flags) + if err != nil { + return err + } + + c.options.ServiceAccountCredentialsFile, err = mustHaveString(udmrepo.StoreOptionCredentialFile, flags) + if err != nil { + return err + } + + c.options.Prefix = optionalHaveString(udmrepo.StoreOptionPrefix, flags) + c.options.ReadOnly = optionalHaveBool(ctx, udmrepo.StoreOptionGcsReadonly, flags) + + c.options.Limits = setupLimits(ctx, flags) + + return nil +} + +func (c *GCSBackend) Connect(ctx context.Context, isCreate bool) (blob.Storage, error) { + return gcs.New(ctx, &c.options) +} diff --git a/pkg/repository/udmrepo/kopialib/backend/gcs_test.go b/pkg/repository/udmrepo/kopialib/backend/gcs_test.go new file mode 100644 index 0000000000..7abdcab3e1 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/gcs_test.go @@ -0,0 +1,61 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +func TestGcsSetup(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + expectedErr string + }{ + { + name: "must have bucket name", + flags: map[string]string{}, + expectedErr: "key " + udmrepo.StoreOptionOssBucket + " not found", + }, + { + name: "must have credential file", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + }, + expectedErr: "key " + udmrepo.StoreOptionCredentialFile + " not found", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gcsFlags := GCSBackend{} + + err := gcsFlags.Setup(context.Background(), tc.flags) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepository.go b/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepository.go new file mode 100644 index 0000000000..c327878827 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepository.go @@ -0,0 +1,542 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + blob "github.com/kopia/kopia/repo/blob" + content "github.com/kopia/kopia/repo/content" + + context "context" + + index "github.com/kopia/kopia/repo/content/index" + + manifest "github.com/kopia/kopia/repo/manifest" + + mock "github.com/stretchr/testify/mock" + + object "github.com/kopia/kopia/repo/object" + + repo "github.com/kopia/kopia/repo" + + throttling "github.com/kopia/kopia/repo/blob/throttling" + + time "time" +) + +// DirectRepository is an autogenerated mock type for the DirectRepository type +type DirectRepository struct { + mock.Mock +} + +// AlsoLogToContentLog provides a mock function with given fields: ctx +func (_m *DirectRepository) AlsoLogToContentLog(ctx context.Context) context.Context { + ret := _m.Called(ctx) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// BlobCfg provides a mock function with given fields: +func (_m *DirectRepository) BlobCfg() content.BlobCfgBlob { + ret := _m.Called() + + var r0 content.BlobCfgBlob + if rf, ok := ret.Get(0).(func() content.BlobCfgBlob); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(content.BlobCfgBlob) + } + + return r0 +} + +// BlobReader provides a mock function with given fields: +func (_m *DirectRepository) BlobReader() blob.Reader { + ret := _m.Called() + + var r0 blob.Reader + if rf, ok := ret.Get(0).(func() blob.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Reader) + } + } + + return r0 +} + +// BlobVolume provides a mock function with given fields: +func (_m *DirectRepository) BlobVolume() blob.Volume { + ret := _m.Called() + + var r0 blob.Volume + if rf, ok := ret.Get(0).(func() blob.Volume); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Volume) + } + } + + return r0 +} + +// ClientOptions provides a mock function with given fields: +func (_m *DirectRepository) ClientOptions() repo.ClientOptions { + ret := _m.Called() + + var r0 repo.ClientOptions + if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(repo.ClientOptions) + } + + return r0 +} + +// Close provides a mock function with given fields: ctx +func (_m *DirectRepository) Close(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConfigFilename provides a mock function with given fields: +func (_m *DirectRepository) ConfigFilename() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// ContentInfo provides a mock function with given fields: ctx, contentID +func (_m *DirectRepository) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) { + ret := _m.Called(ctx, contentID) + + var r0 index.Info + if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok { + r0 = rf(ctx, contentID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(index.Info) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok { + r1 = rf(ctx, contentID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ContentReader provides a mock function with given fields: +func (_m *DirectRepository) ContentReader() content.Reader { + ret := _m.Called() + + var r0 content.Reader + if rf, ok := ret.Get(0).(func() content.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(content.Reader) + } + } + + return r0 +} + +// Crypter provides a mock function with given fields: +func (_m *DirectRepository) Crypter() *content.Crypter { + ret := _m.Called() + + var r0 *content.Crypter + if rf, ok := ret.Get(0).(func() *content.Crypter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*content.Crypter) + } + } + + return r0 +} + +// DeriveKey provides a mock function with given fields: purpose, keyLength +func (_m *DirectRepository) DeriveKey(purpose []byte, keyLength int) []byte { + ret := _m.Called(purpose, keyLength) + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte, int) []byte); ok { + r0 = rf(purpose, keyLength) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// DisableIndexRefresh provides a mock function with given fields: +func (_m *DirectRepository) DisableIndexRefresh() { + _m.Called() +} + +// FindManifests provides a mock function with given fields: ctx, labels +func (_m *DirectRepository) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, labels) + + var r0 []*manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok { + r0 = rf(ctx, labels) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok { + r1 = rf(ctx, labels) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManifest provides a mock function with given fields: ctx, id, data +func (_m *DirectRepository) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, id, data) + + var r0 *manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok { + r0 = rf(ctx, id, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok { + r1 = rf(ctx, id, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexBlobs provides a mock function with given fields: ctx, includeInactive +func (_m *DirectRepository) IndexBlobs(ctx context.Context, includeInactive bool) ([]content.IndexBlobInfo, error) { + ret := _m.Called(ctx, includeInactive) + + var r0 []content.IndexBlobInfo + if rf, ok := ret.Get(0).(func(context.Context, bool) []content.IndexBlobInfo); ok { + r0 = rf(ctx, includeInactive) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]content.IndexBlobInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(ctx, includeInactive) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDirectWriter provides a mock function with given fields: ctx, opt +func (_m *DirectRepository) NewDirectWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.DirectRepositoryWriter, error) { + ret := _m.Called(ctx, opt) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + var r1 repo.DirectRepositoryWriter + if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.DirectRepositoryWriter); ok { + r1 = rf(ctx, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(repo.DirectRepositoryWriter) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok { + r2 = rf(ctx, opt) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewWriter provides a mock function with given fields: ctx, opt +func (_m *DirectRepository) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) { + ret := _m.Called(ctx, opt) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + var r1 repo.RepositoryWriter + if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok { + r1 = rf(ctx, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(repo.RepositoryWriter) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok { + r2 = rf(ctx, opt) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ObjectFormat provides a mock function with given fields: +func (_m *DirectRepository) ObjectFormat() object.Format { + ret := _m.Called() + + var r0 object.Format + if rf, ok := ret.Get(0).(func() object.Format); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(object.Format) + } + + return r0 +} + +// OpenObject provides a mock function with given fields: ctx, id +func (_m *DirectRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) { + ret := _m.Called(ctx, id) + + var r0 object.Reader + if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(object.Reader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint +func (_m *DirectRepository) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID { + ret := _m.Called(ctx, contentIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok { + r0 = rf(ctx, contentIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + return r0 +} + +// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint +func (_m *DirectRepository) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) { + ret := _m.Called(ctx, objectIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok { + r0 = rf(ctx, objectIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok { + r1 = rf(ctx, objectIDs, hint) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Refresh provides a mock function with given fields: ctx +func (_m *DirectRepository) Refresh(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Throttler provides a mock function with given fields: +func (_m *DirectRepository) Throttler() throttling.SettableThrottler { + ret := _m.Called() + + var r0 throttling.SettableThrottler + if rf, ok := ret.Get(0).(func() throttling.SettableThrottler); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(throttling.SettableThrottler) + } + } + + return r0 +} + +// Time provides a mock function with given fields: +func (_m *DirectRepository) Time() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// Token provides a mock function with given fields: password +func (_m *DirectRepository) Token(password string) (string, error) { + ret := _m.Called(password) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(password) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UniqueID provides a mock function with given fields: +func (_m *DirectRepository) UniqueID() []byte { + ret := _m.Called() + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// UpdateDescription provides a mock function with given fields: d +func (_m *DirectRepository) UpdateDescription(d string) { + _m.Called(d) +} + +// VerifyObject provides a mock function with given fields: ctx, id +func (_m *DirectRepository) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) { + ret := _m.Called(ctx, id) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewDirectRepository interface { + mock.TestingT + Cleanup(func()) +} + +// NewDirectRepository creates a new instance of DirectRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewDirectRepository(t mockConstructorTestingTNewDirectRepository) *DirectRepository { + mock := &DirectRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepositoryWriter.go b/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepositoryWriter.go new file mode 100644 index 0000000000..2116a025ca --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/mocks/DirectRepositoryWriter.go @@ -0,0 +1,718 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + blob "github.com/kopia/kopia/repo/blob" + content "github.com/kopia/kopia/repo/content" + + context "context" + + index "github.com/kopia/kopia/repo/content/index" + + manifest "github.com/kopia/kopia/repo/manifest" + + mock "github.com/stretchr/testify/mock" + + object "github.com/kopia/kopia/repo/object" + + repo "github.com/kopia/kopia/repo" + + throttling "github.com/kopia/kopia/repo/blob/throttling" + + time "time" +) + +// DirectRepositoryWriter is an autogenerated mock type for the DirectRepositoryWriter type +type DirectRepositoryWriter struct { + mock.Mock +} + +// AlsoLogToContentLog provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) AlsoLogToContentLog(ctx context.Context) context.Context { + ret := _m.Called(ctx) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// BlobCfg provides a mock function with given fields: +func (_m *DirectRepositoryWriter) BlobCfg() content.BlobCfgBlob { + ret := _m.Called() + + var r0 content.BlobCfgBlob + if rf, ok := ret.Get(0).(func() content.BlobCfgBlob); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(content.BlobCfgBlob) + } + + return r0 +} + +// BlobReader provides a mock function with given fields: +func (_m *DirectRepositoryWriter) BlobReader() blob.Reader { + ret := _m.Called() + + var r0 blob.Reader + if rf, ok := ret.Get(0).(func() blob.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Reader) + } + } + + return r0 +} + +// BlobStorage provides a mock function with given fields: +func (_m *DirectRepositoryWriter) BlobStorage() blob.Storage { + ret := _m.Called() + + var r0 blob.Storage + if rf, ok := ret.Get(0).(func() blob.Storage); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Storage) + } + } + + return r0 +} + +// BlobVolume provides a mock function with given fields: +func (_m *DirectRepositoryWriter) BlobVolume() blob.Volume { + ret := _m.Called() + + var r0 blob.Volume + if rf, ok := ret.Get(0).(func() blob.Volume); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Volume) + } + } + + return r0 +} + +// ChangePassword provides a mock function with given fields: ctx, newPassword +func (_m *DirectRepositoryWriter) ChangePassword(ctx context.Context, newPassword string) error { + ret := _m.Called(ctx, newPassword) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, newPassword) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClientOptions provides a mock function with given fields: +func (_m *DirectRepositoryWriter) ClientOptions() repo.ClientOptions { + ret := _m.Called() + + var r0 repo.ClientOptions + if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(repo.ClientOptions) + } + + return r0 +} + +// Close provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) Close(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitUpgrade provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) CommitUpgrade(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConfigFilename provides a mock function with given fields: +func (_m *DirectRepositoryWriter) ConfigFilename() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// ContentInfo provides a mock function with given fields: ctx, contentID +func (_m *DirectRepositoryWriter) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) { + ret := _m.Called(ctx, contentID) + + var r0 index.Info + if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok { + r0 = rf(ctx, contentID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(index.Info) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok { + r1 = rf(ctx, contentID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ContentManager provides a mock function with given fields: +func (_m *DirectRepositoryWriter) ContentManager() *content.WriteManager { + ret := _m.Called() + + var r0 *content.WriteManager + if rf, ok := ret.Get(0).(func() *content.WriteManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*content.WriteManager) + } + } + + return r0 +} + +// ContentReader provides a mock function with given fields: +func (_m *DirectRepositoryWriter) ContentReader() content.Reader { + ret := _m.Called() + + var r0 content.Reader + if rf, ok := ret.Get(0).(func() content.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(content.Reader) + } + } + + return r0 +} + +// Crypter provides a mock function with given fields: +func (_m *DirectRepositoryWriter) Crypter() *content.Crypter { + ret := _m.Called() + + var r0 *content.Crypter + if rf, ok := ret.Get(0).(func() *content.Crypter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*content.Crypter) + } + } + + return r0 +} + +// DeleteManifest provides a mock function with given fields: ctx, id +func (_m *DirectRepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeriveKey provides a mock function with given fields: purpose, keyLength +func (_m *DirectRepositoryWriter) DeriveKey(purpose []byte, keyLength int) []byte { + ret := _m.Called(purpose, keyLength) + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte, int) []byte); ok { + r0 = rf(purpose, keyLength) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// DisableIndexRefresh provides a mock function with given fields: +func (_m *DirectRepositoryWriter) DisableIndexRefresh() { + _m.Called() +} + +// FindManifests provides a mock function with given fields: ctx, labels +func (_m *DirectRepositoryWriter) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, labels) + + var r0 []*manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok { + r0 = rf(ctx, labels) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok { + r1 = rf(ctx, labels) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Flush provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) Flush(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetManifest provides a mock function with given fields: ctx, id, data +func (_m *DirectRepositoryWriter) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) { + ret := _m.Called(ctx, id, data) + + var r0 *manifest.EntryMetadata + if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok { + r0 = rf(ctx, id, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*manifest.EntryMetadata) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok { + r1 = rf(ctx, id, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexBlobs provides a mock function with given fields: ctx, includeInactive +func (_m *DirectRepositoryWriter) IndexBlobs(ctx context.Context, includeInactive bool) ([]content.IndexBlobInfo, error) { + ret := _m.Called(ctx, includeInactive) + + var r0 []content.IndexBlobInfo + if rf, ok := ret.Get(0).(func(context.Context, bool) []content.IndexBlobInfo); ok { + r0 = rf(ctx, includeInactive) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]content.IndexBlobInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(ctx, includeInactive) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDirectWriter provides a mock function with given fields: ctx, opt +func (_m *DirectRepositoryWriter) NewDirectWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.DirectRepositoryWriter, error) { + ret := _m.Called(ctx, opt) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + var r1 repo.DirectRepositoryWriter + if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.DirectRepositoryWriter); ok { + r1 = rf(ctx, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(repo.DirectRepositoryWriter) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok { + r2 = rf(ctx, opt) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewObjectWriter provides a mock function with given fields: ctx, opt +func (_m *DirectRepositoryWriter) NewObjectWriter(ctx context.Context, opt object.WriterOptions) object.Writer { + ret := _m.Called(ctx, opt) + + var r0 object.Writer + if rf, ok := ret.Get(0).(func(context.Context, object.WriterOptions) object.Writer); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(object.Writer) + } + } + + return r0 +} + +// NewWriter provides a mock function with given fields: ctx, opt +func (_m *DirectRepositoryWriter) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) { + ret := _m.Called(ctx, opt) + + var r0 context.Context + if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok { + r0 = rf(ctx, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + var r1 repo.RepositoryWriter + if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok { + r1 = rf(ctx, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(repo.RepositoryWriter) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok { + r2 = rf(ctx, opt) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ObjectFormat provides a mock function with given fields: +func (_m *DirectRepositoryWriter) ObjectFormat() object.Format { + ret := _m.Called() + + var r0 object.Format + if rf, ok := ret.Get(0).(func() object.Format); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(object.Format) + } + + return r0 +} + +// OpenObject provides a mock function with given fields: ctx, id +func (_m *DirectRepositoryWriter) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) { + ret := _m.Called(ctx, id) + + var r0 object.Reader + if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(object.Reader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint +func (_m *DirectRepositoryWriter) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID { + ret := _m.Called(ctx, contentIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok { + r0 = rf(ctx, contentIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + return r0 +} + +// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint +func (_m *DirectRepositoryWriter) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) { + ret := _m.Called(ctx, objectIDs, hint) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok { + r0 = rf(ctx, objectIDs, hint) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok { + r1 = rf(ctx, objectIDs, hint) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PutManifest provides a mock function with given fields: ctx, labels, payload +func (_m *DirectRepositoryWriter) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) { + ret := _m.Called(ctx, labels, payload) + + var r0 manifest.ID + if rf, ok := ret.Get(0).(func(context.Context, map[string]string, interface{}) manifest.ID); ok { + r0 = rf(ctx, labels, payload) + } else { + r0 = ret.Get(0).(manifest.ID) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, map[string]string, interface{}) error); ok { + r1 = rf(ctx, labels, payload) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Refresh provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) Refresh(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RollbackUpgrade provides a mock function with given fields: ctx +func (_m *DirectRepositoryWriter) RollbackUpgrade(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetParameters provides a mock function with given fields: ctx, m, blobcfg +func (_m *DirectRepositoryWriter) SetParameters(ctx context.Context, m content.MutableParameters, blobcfg content.BlobCfgBlob) error { + ret := _m.Called(ctx, m, blobcfg) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, content.MutableParameters, content.BlobCfgBlob) error); ok { + r0 = rf(ctx, m, blobcfg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetUpgradeLockIntent provides a mock function with given fields: ctx, l +func (_m *DirectRepositoryWriter) SetUpgradeLockIntent(ctx context.Context, l content.UpgradeLock) (*content.UpgradeLock, error) { + ret := _m.Called(ctx, l) + + var r0 *content.UpgradeLock + if rf, ok := ret.Get(0).(func(context.Context, content.UpgradeLock) *content.UpgradeLock); ok { + r0 = rf(ctx, l) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*content.UpgradeLock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, content.UpgradeLock) error); ok { + r1 = rf(ctx, l) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Throttler provides a mock function with given fields: +func (_m *DirectRepositoryWriter) Throttler() throttling.SettableThrottler { + ret := _m.Called() + + var r0 throttling.SettableThrottler + if rf, ok := ret.Get(0).(func() throttling.SettableThrottler); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(throttling.SettableThrottler) + } + } + + return r0 +} + +// Time provides a mock function with given fields: +func (_m *DirectRepositoryWriter) Time() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// Token provides a mock function with given fields: password +func (_m *DirectRepositoryWriter) Token(password string) (string, error) { + ret := _m.Called(password) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(password) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UniqueID provides a mock function with given fields: +func (_m *DirectRepositoryWriter) UniqueID() []byte { + ret := _m.Called() + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// UpdateDescription provides a mock function with given fields: d +func (_m *DirectRepositoryWriter) UpdateDescription(d string) { + _m.Called(d) +} + +// VerifyObject provides a mock function with given fields: ctx, id +func (_m *DirectRepositoryWriter) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) { + ret := _m.Called(ctx, id) + + var r0 []index.ID + if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]index.ID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewDirectRepositoryWriter interface { + mock.TestingT + Cleanup(func()) +} + +// NewDirectRepositoryWriter creates a new instance of DirectRepositoryWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewDirectRepositoryWriter(t mockConstructorTestingTNewDirectRepositoryWriter) *DirectRepositoryWriter { + mock := &DirectRepositoryWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/udmrepo/kopialib/backend/mocks/Logger.go b/pkg/repository/udmrepo/kopialib/backend/mocks/Logger.go new file mode 100644 index 0000000000..90a2199104 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/mocks/Logger.go @@ -0,0 +1,65 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Logger is an autogenerated mock type for the Logger type +type Logger struct { + mock.Mock +} + +// Debugf provides a mock function with given fields: msg, args +func (_m *Logger) Debugf(msg string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Debugw provides a mock function with given fields: msg, keyValuePairs +func (_m *Logger) Debugw(msg string, keyValuePairs ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keyValuePairs...) + _m.Called(_ca...) +} + +// Errorf provides a mock function with given fields: msg, args +func (_m *Logger) Errorf(msg string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Infof provides a mock function with given fields: msg, args +func (_m *Logger) Infof(msg string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Warnf provides a mock function with given fields: msg, args +func (_m *Logger) Warnf(msg string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +type mockConstructorTestingTNewLogger interface { + mock.TestingT + Cleanup(func()) +} + +// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewLogger(t mockConstructorTestingTNewLogger) *Logger { + mock := &Logger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/udmrepo/kopialib/backend/mocks/Storage.go b/pkg/repository/udmrepo/kopialib/backend/mocks/Storage.go new file mode 100644 index 0000000000..de49e75ff1 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/mocks/Storage.go @@ -0,0 +1,185 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + blob "github.com/kopia/kopia/repo/blob" + + mock "github.com/stretchr/testify/mock" +) + +// Storage is an autogenerated mock type for the Storage type +type Storage struct { + mock.Mock +} + +// Close provides a mock function with given fields: ctx +func (_m *Storage) Close(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConnectionInfo provides a mock function with given fields: +func (_m *Storage) ConnectionInfo() blob.ConnectionInfo { + ret := _m.Called() + + var r0 blob.ConnectionInfo + if rf, ok := ret.Get(0).(func() blob.ConnectionInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(blob.ConnectionInfo) + } + + return r0 +} + +// DeleteBlob provides a mock function with given fields: ctx, blobID +func (_m *Storage) DeleteBlob(ctx context.Context, blobID blob.ID) error { + ret := _m.Called(ctx, blobID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blob.ID) error); ok { + r0 = rf(ctx, blobID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DisplayName provides a mock function with given fields: +func (_m *Storage) DisplayName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// FlushCaches provides a mock function with given fields: ctx +func (_m *Storage) FlushCaches(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetBlob provides a mock function with given fields: ctx, blobID, offset, length, output +func (_m *Storage) GetBlob(ctx context.Context, blobID blob.ID, offset int64, length int64, output blob.OutputBuffer) error { + ret := _m.Called(ctx, blobID, offset, length, output) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blob.ID, int64, int64, blob.OutputBuffer) error); ok { + r0 = rf(ctx, blobID, offset, length, output) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetCapacity provides a mock function with given fields: ctx +func (_m *Storage) GetCapacity(ctx context.Context) (blob.Capacity, error) { + ret := _m.Called(ctx) + + var r0 blob.Capacity + if rf, ok := ret.Get(0).(func(context.Context) blob.Capacity); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(blob.Capacity) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMetadata provides a mock function with given fields: ctx, blobID +func (_m *Storage) GetMetadata(ctx context.Context, blobID blob.ID) (blob.Metadata, error) { + ret := _m.Called(ctx, blobID) + + var r0 blob.Metadata + if rf, ok := ret.Get(0).(func(context.Context, blob.ID) blob.Metadata); ok { + r0 = rf(ctx, blobID) + } else { + r0 = ret.Get(0).(blob.Metadata) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, blob.ID) error); ok { + r1 = rf(ctx, blobID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListBlobs provides a mock function with given fields: ctx, blobIDPrefix, cb +func (_m *Storage) ListBlobs(ctx context.Context, blobIDPrefix blob.ID, cb func(blob.Metadata) error) error { + ret := _m.Called(ctx, blobIDPrefix, cb) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blob.ID, func(blob.Metadata) error) error); ok { + r0 = rf(ctx, blobIDPrefix, cb) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PutBlob provides a mock function with given fields: ctx, blobID, data, opts +func (_m *Storage) PutBlob(ctx context.Context, blobID blob.ID, data blob.Bytes, opts blob.PutOptions) error { + ret := _m.Called(ctx, blobID, data, opts) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blob.ID, blob.Bytes, blob.PutOptions) error); ok { + r0 = rf(ctx, blobID, data, opts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewStorage interface { + mock.TestingT + Cleanup(func()) +} + +// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStorage(t mockConstructorTestingTNewStorage) *Storage { + mock := &Storage{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/udmrepo/kopialib/backend/mocks/Store.go b/pkg/repository/udmrepo/kopialib/backend/mocks/Store.go new file mode 100644 index 0000000000..7c09aa9efb --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/mocks/Store.go @@ -0,0 +1,68 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + blob "github.com/kopia/kopia/repo/blob" + + mock "github.com/stretchr/testify/mock" +) + +// Store is an autogenerated mock type for the Store type +type Store struct { + mock.Mock +} + +// Connect provides a mock function with given fields: ctx, isCreate +func (_m *Store) Connect(ctx context.Context, isCreate bool) (blob.Storage, error) { + ret := _m.Called(ctx, isCreate) + + var r0 blob.Storage + if rf, ok := ret.Get(0).(func(context.Context, bool) blob.Storage); ok { + r0 = rf(ctx, isCreate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blob.Storage) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(ctx, isCreate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Setup provides a mock function with given fields: ctx, flags +func (_m *Store) Setup(ctx context.Context, flags map[string]string) error { + ret := _m.Called(ctx, flags) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, map[string]string) error); ok { + r0 = rf(ctx, flags) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t mockConstructorTestingTNewStore) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/repository/udmrepo/kopialib/backend/s3.go b/pkg/repository/udmrepo/kopialib/backend/s3.go new file mode 100644 index 0000000000..38eeab1066 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/s3.go @@ -0,0 +1,63 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/blob/s3" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +type S3Backend struct { + options s3.Options +} + +func (c *S3Backend) Setup(ctx context.Context, flags map[string]string) error { + var err error + c.options.BucketName, err = mustHaveString(udmrepo.StoreOptionOssBucket, flags) + if err != nil { + return err + } + + c.options.AccessKeyID, err = mustHaveString(udmrepo.StoreOptionS3KeyId, flags) + if err != nil { + return err + } + + c.options.SecretAccessKey, err = mustHaveString(udmrepo.StoreOptionS3SecretKey, flags) + if err != nil { + return err + } + + c.options.Endpoint = optionalHaveString(udmrepo.StoreOptionS3Endpoint, flags) + c.options.Region = optionalHaveString(udmrepo.StoreOptionOssRegion, flags) + c.options.Prefix = optionalHaveString(udmrepo.StoreOptionPrefix, flags) + c.options.DoNotUseTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTls, flags) + c.options.DoNotVerifyTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTlsVerify, flags) + c.options.SessionToken = optionalHaveString(udmrepo.StoreOptionS3Token, flags) + + c.options.Limits = setupLimits(ctx, flags) + + return nil +} + +func (c *S3Backend) Connect(ctx context.Context, isCreate bool) (blob.Storage, error) { + return s3.New(ctx, &c.options) +} diff --git a/pkg/repository/udmrepo/kopialib/backend/s3_test.go b/pkg/repository/udmrepo/kopialib/backend/s3_test.go new file mode 100644 index 0000000000..493c1e904a --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/s3_test.go @@ -0,0 +1,69 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +func TestS3Setup(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + expectedErr string + }{ + { + name: "must have bucket name", + flags: map[string]string{}, + expectedErr: "key " + udmrepo.StoreOptionOssBucket + " not found", + }, + { + name: "must have access key Id", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + }, + expectedErr: "key " + udmrepo.StoreOptionS3KeyId + " not found", + }, + { + name: "must have access key", + flags: map[string]string{ + udmrepo.StoreOptionOssBucket: "fake-bucket", + udmrepo.StoreOptionS3KeyId: "fake-key-id", + }, + expectedErr: "key " + udmrepo.StoreOptionS3SecretKey + " not found", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s3Flags := S3Backend{} + + err := s3Flags.Setup(context.Background(), tc.flags) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/kopialib/backend/utils.go b/pkg/repository/udmrepo/kopialib/backend/utils.go new file mode 100644 index 0000000000..eb673539fd --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/utils.go @@ -0,0 +1,91 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "strconv" + "time" + + "github.com/kopia/kopia/repo/logging" + "github.com/pkg/errors" +) + +func mustHaveString(key string, flags map[string]string) (string, error) { + if value, exist := flags[key]; exist { + return value, nil + } else { + return "", errors.New("key " + key + " not found") + } +} + +func optionalHaveString(key string, flags map[string]string) string { + return optionalHaveStringWithDefault(key, flags, "") +} + +func optionalHaveBool(ctx context.Context, key string, flags map[string]string) bool { + if value, exist := flags[key]; exist { + if value != "" { + ret, err := strconv.ParseBool(value) + if err == nil { + return ret + } + + backendLog()(ctx).Errorf("Ignore %s, value [%s] is invalid, err %v", key, value, err) + } + } + + return false +} + +func optionalHaveFloat64(ctx context.Context, key string, flags map[string]string) float64 { + if value, exist := flags[key]; exist { + ret, err := strconv.ParseFloat(value, 64) + if err == nil { + return ret + } + + backendLog()(ctx).Errorf("Ignore %s, value [%s] is invalid, err %v", key, value, err) + } + + return 0 +} + +func optionalHaveStringWithDefault(key string, flags map[string]string, defValue string) string { + if value, exist := flags[key]; exist { + return value + } else { + return defValue + } +} + +func optionalHaveDuration(ctx context.Context, key string, flags map[string]string) time.Duration { + if value, exist := flags[key]; exist { + ret, err := time.ParseDuration(value) + if err == nil { + return ret + } + + backendLog()(ctx).Errorf("Ignore %s, value [%s] is invalid, err %v", key, value, err) + } + + return 0 +} + +func backendLog() func(ctx context.Context) logging.Logger { + return logging.Module("kopialib-bd") +} diff --git a/pkg/repository/udmrepo/kopialib/backend/utils_test.go b/pkg/repository/udmrepo/kopialib/backend/utils_test.go new file mode 100644 index 0000000000..cb33236258 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/backend/utils_test.go @@ -0,0 +1,87 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backend + +import ( + "context" + "fmt" + "testing" + + "github.com/kopia/kopia/repo/logging" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + storagemocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks" +) + +func TestOptionalHaveBool(t *testing.T) { + var expectMsg string + testCases := []struct { + name string + key string + flags map[string]string + logger *storagemocks.Logger + retFuncErrorf func(mock.Arguments) + expectMsg string + retValue bool + }{ + { + name: "key not exist", + key: "fake-key", + flags: map[string]string{}, + retValue: false, + }, + { + name: "value valid", + key: "fake-key", + flags: map[string]string{ + "fake-key": "true", + }, + retValue: true, + }, + { + name: "value invalid", + key: "fake-key", + flags: map[string]string{ + "fake-key": "fake-value", + }, + logger: new(storagemocks.Logger), + retFuncErrorf: func(args mock.Arguments) { + expectMsg = fmt.Sprintf(args[0].(string), args[1].(string), args[2].(string), args[3].(error)) + }, + expectMsg: "Ignore fake-key, value [fake-value] is invalid, err strconv.ParseBool: parsing \"fake-value\": invalid syntax", + retValue: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.logger != nil { + tc.logger.On("Errorf", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(tc.retFuncErrorf) + } + + ctx := logging.WithLogger(context.Background(), func(module string) logging.Logger { + return tc.logger + }) + + retValue := optionalHaveBool(ctx, tc.key, tc.flags) + + require.Equal(t, retValue, tc.retValue) + require.Equal(t, tc.expectMsg, expectMsg) + }) + } +} diff --git a/pkg/repository/udmrepo/kopialib/lib_repo.go b/pkg/repository/udmrepo/kopialib/lib_repo.go new file mode 100644 index 0000000000..1a32560f37 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/lib_repo.go @@ -0,0 +1,573 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopialib + +import ( + "context" + "os" + "runtime" + "strings" + "sync/atomic" + "time" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/compression" + "github.com/kopia/kopia/repo/content/index" + "github.com/kopia/kopia/repo/maintenance" + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/repo/object" + "github.com/kopia/kopia/snapshot/snapshotmaintenance" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/util/logging" +) + +type kopiaRepoService struct { + logger logrus.FieldLogger +} + +type kopiaRepository struct { + rawRepo repo.Repository + rawWriter repo.RepositoryWriter + description string + uploaded int64 + openTime time.Time + throttle logThrottle + logger logrus.FieldLogger +} + +type kopiaMaintenance struct { + mode maintenance.Mode + startTime time.Time + uploaded int64 + throttle logThrottle + logger logrus.FieldLogger +} + +type logThrottle struct { + lastTime int64 + interval time.Duration +} + +type kopiaObjectReader struct { + rawReader object.Reader +} + +type kopiaObjectWriter struct { + rawWriter object.Writer +} + +const ( + defaultLogInterval = time.Duration(time.Second * 10) + defaultMaintainCheckPeriod = time.Hour + overwriteFullMaintainInterval = time.Duration(0) + overwriteQuickMaintainInterval = time.Duration(0) +) + +var kopiaRepoOpen = repo.Open + +// NewKopiaRepoService creates an instance of BackupRepoService implemented by Kopia +func NewKopiaRepoService(logger logrus.FieldLogger) udmrepo.BackupRepoService { + ks := &kopiaRepoService{ + logger: logger, + } + + return ks +} + +func (ks *kopiaRepoService) Init(ctx context.Context, repoOption udmrepo.RepoOptions, createNew bool) error { + repoCtx := logging.SetupKopiaLog(ctx, ks.logger) + + if createNew { + if err := CreateBackupRepo(repoCtx, repoOption); err != nil { + return err + } + + return writeInitParameters(repoCtx, repoOption, ks.logger) + } else { + return ConnectBackupRepo(repoCtx, repoOption) + } +} + +func (ks *kopiaRepoService) Open(ctx context.Context, repoOption udmrepo.RepoOptions) (udmrepo.BackupRepo, error) { + repoConfig := repoOption.ConfigFilePath + if repoConfig == "" { + return nil, errors.New("invalid config file path") + } + + if _, err := os.Stat(repoConfig); os.IsNotExist(err) { + return nil, errors.Wrapf(err, "repo config %s doesn't exist", repoConfig) + } + + repoCtx := logging.SetupKopiaLog(ctx, ks.logger) + + r, err := openKopiaRepo(repoCtx, repoConfig, repoOption.RepoPassword) + if err != nil { + return nil, err + } + + kr := kopiaRepository{ + rawRepo: r, + openTime: time.Now(), + description: repoOption.Description, + throttle: logThrottle{ + interval: defaultLogInterval, + }, + logger: ks.logger, + } + + _, kr.rawWriter, err = r.NewWriter(repoCtx, repo.WriteSessionOptions{ + Purpose: repoOption.Description, + OnUpload: kr.updateProgress, + }) + + if err != nil { + if e := r.Close(repoCtx); e != nil { + ks.logger.WithError(e).Error("Failed to close raw repository on error") + } + + return nil, errors.Wrap(err, "error to create repo writer") + } + + return &kr, nil +} + +func (ks *kopiaRepoService) Maintain(ctx context.Context, repoOption udmrepo.RepoOptions) error { + repoConfig := repoOption.ConfigFilePath + if repoConfig == "" { + return errors.New("invalid config file path") + } + + if _, err := os.Stat(repoConfig); os.IsNotExist(err) { + return errors.Wrapf(err, "repo config %s doesn't exist", repoConfig) + } + + repoCtx := logging.SetupKopiaLog(ctx, ks.logger) + + r, err := openKopiaRepo(repoCtx, repoConfig, repoOption.RepoPassword) + if err != nil { + return err + } + + defer func() { + c := r.Close(repoCtx) + if c != nil { + ks.logger.WithError(c).Error("Failed to close repo") + } + }() + + km := kopiaMaintenance{ + mode: maintenance.ModeAuto, + startTime: time.Now(), + throttle: logThrottle{ + interval: defaultLogInterval, + }, + logger: ks.logger, + } + + if mode, exist := repoOption.GeneralOptions[udmrepo.GenOptionMaintainMode]; exist { + if strings.EqualFold(mode, udmrepo.GenOptionMaintainFull) { + km.mode = maintenance.ModeFull + } else if strings.EqualFold(mode, udmrepo.GenOptionMaintainQuick) { + km.mode = maintenance.ModeQuick + } + } + + err = repo.DirectWriteSession(repoCtx, r.(repo.DirectRepository), repo.WriteSessionOptions{ + Purpose: "UdmRepoMaintenance", + OnUpload: km.maintainProgress, + }, func(ctx context.Context, dw repo.DirectRepositoryWriter) error { + return km.runMaintenance(ctx, dw) + }) + + if err != nil { + return errors.Wrap(err, "error to maintain repo") + } + + return nil +} + +func (ks *kopiaRepoService) DefaultMaintenanceFrequency() time.Duration { + return defaultMaintainCheckPeriod +} + +func (km *kopiaMaintenance) runMaintenance(ctx context.Context, rep repo.DirectRepositoryWriter) error { + err := snapshotmaintenance.Run(logging.SetupKopiaLog(ctx, km.logger), rep, km.mode, false, maintenance.SafetyFull) + if err != nil { + return errors.Wrapf(err, "error to run maintenance under mode %s", km.mode) + } + + return nil +} + +// maintainProgress is called when the repository writes a piece of blob data to the storage during the maintenance +func (km *kopiaMaintenance) maintainProgress(uploaded int64) { + total := atomic.AddInt64(&km.uploaded, uploaded) + + if km.throttle.shouldLog() { + km.logger.WithFields( + logrus.Fields{ + "Start Time": km.startTime.Format(time.RFC3339Nano), + "Current": time.Now().Format(time.RFC3339Nano), + }, + ).Debugf("Repo maintenance uploaded %d bytes.", total) + } +} + +func (kr *kopiaRepository) OpenObject(ctx context.Context, id udmrepo.ID) (udmrepo.ObjectReader, error) { + if kr.rawRepo == nil { + return nil, errors.New("repo is closed or not open") + } + + reader, err := kr.rawRepo.OpenObject(logging.SetupKopiaLog(ctx, kr.logger), object.ID(id)) + if err != nil { + return nil, errors.Wrap(err, "error to open object") + } + + return &kopiaObjectReader{ + rawReader: reader, + }, nil +} + +func (kr *kopiaRepository) GetManifest(ctx context.Context, id udmrepo.ID, mani *udmrepo.RepoManifest) error { + if kr.rawRepo == nil { + return errors.New("repo is closed or not open") + } + + metadata, err := kr.rawRepo.GetManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.ID(id), mani.Payload) + if err != nil { + return errors.Wrap(err, "error to get manifest") + } + + mani.Metadata = getManifestEntryFromKopia(metadata) + + return nil +} + +func (kr *kopiaRepository) FindManifests(ctx context.Context, filter udmrepo.ManifestFilter) ([]*udmrepo.ManifestEntryMetadata, error) { + if kr.rawRepo == nil { + return nil, errors.New("repo is closed or not open") + } + + metadata, err := kr.rawRepo.FindManifests(logging.SetupKopiaLog(ctx, kr.logger), filter.Labels) + if err != nil { + return nil, errors.Wrap(err, "error to find manifests") + } + + return getManifestEntriesFromKopia(metadata), nil +} + +func (kr *kopiaRepository) Time() time.Time { + if kr.rawRepo == nil { + return time.Time{} + } + + return kr.rawRepo.Time() +} + +func (kr *kopiaRepository) Close(ctx context.Context) error { + if kr.rawWriter != nil { + err := kr.rawWriter.Close(logging.SetupKopiaLog(ctx, kr.logger)) + if err != nil { + return errors.Wrap(err, "error to close repo writer") + } + + kr.rawWriter = nil + } + + if kr.rawRepo != nil { + err := kr.rawRepo.Close(logging.SetupKopiaLog(ctx, kr.logger)) + if err != nil { + return errors.Wrap(err, "error to close repo") + } + + kr.rawRepo = nil + } + + return nil +} + +func (kr *kopiaRepository) NewObjectWriter(ctx context.Context, opt udmrepo.ObjectWriteOptions) udmrepo.ObjectWriter { + if kr.rawWriter == nil { + return nil + } + + writer := kr.rawWriter.NewObjectWriter(logging.SetupKopiaLog(ctx, kr.logger), object.WriterOptions{ + Description: opt.Description, + Prefix: index.ID(opt.Prefix), + AsyncWrites: getAsyncWrites(), + Compressor: getCompressorForObject(opt), + }) + + if writer == nil { + return nil + } + + return &kopiaObjectWriter{ + rawWriter: writer, + } +} + +func (kr *kopiaRepository) PutManifest(ctx context.Context, manifest udmrepo.RepoManifest) (udmrepo.ID, error) { + if kr.rawWriter == nil { + return "", errors.New("repo writer is closed or not open") + } + + id, err := kr.rawWriter.PutManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.Metadata.Labels, manifest.Payload) + if err != nil { + return "", errors.Wrap(err, "error to put manifest") + } + + return udmrepo.ID(id), nil +} + +func (kr *kopiaRepository) DeleteManifest(ctx context.Context, id udmrepo.ID) error { + if kr.rawWriter == nil { + return errors.New("repo writer is closed or not open") + } + + err := kr.rawWriter.DeleteManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.ID(id)) + if err != nil { + return errors.Wrap(err, "error to delete manifest") + } + + return nil +} + +func (kr *kopiaRepository) Flush(ctx context.Context) error { + if kr.rawWriter == nil { + return errors.New("repo writer is closed or not open") + } + + err := kr.rawWriter.Flush(logging.SetupKopiaLog(ctx, kr.logger)) + if err != nil { + return errors.Wrap(err, "error to flush repo") + } + + return nil +} + +// updateProgress is called when the repository writes a piece of blob data to the storage during data write +func (kr *kopiaRepository) updateProgress(uploaded int64) { + total := atomic.AddInt64(&kr.uploaded, uploaded) + + if kr.throttle.shouldLog() { + kr.logger.WithFields( + logrus.Fields{ + "Description": kr.description, + "Open Time": kr.openTime.Format(time.RFC3339Nano), + "Current": time.Now().Format(time.RFC3339Nano), + }, + ).Debugf("Repo uploaded %d bytes.", total) + } +} + +func (kor *kopiaObjectReader) Read(p []byte) (int, error) { + if kor.rawReader == nil { + return 0, errors.New("object reader is closed or not open") + } + + return kor.rawReader.Read(p) +} + +func (kor *kopiaObjectReader) Seek(offset int64, whence int) (int64, error) { + if kor.rawReader == nil { + return -1, errors.New("object reader is closed or not open") + } + + return kor.rawReader.Seek(offset, whence) +} + +func (kor *kopiaObjectReader) Close() error { + if kor.rawReader == nil { + return nil + } + + err := kor.rawReader.Close() + if err != nil { + return err + } + + kor.rawReader = nil + + return nil +} + +func (kor *kopiaObjectReader) Length() int64 { + if kor.rawReader == nil { + return -1 + } + + return kor.rawReader.Length() +} + +func (kow *kopiaObjectWriter) Write(p []byte) (int, error) { + if kow.rawWriter == nil { + return 0, errors.New("object writer is closed or not open") + } + + return kow.rawWriter.Write(p) +} + +func (kow *kopiaObjectWriter) Seek(offset int64, whence int) (int64, error) { + return -1, errors.New("not supported") +} + +func (kow *kopiaObjectWriter) Checkpoint() (udmrepo.ID, error) { + if kow.rawWriter == nil { + return udmrepo.ID(""), errors.New("object writer is closed or not open") + } + + id, err := kow.rawWriter.Checkpoint() + if err != nil { + return udmrepo.ID(""), errors.Wrap(err, "error to checkpoint object") + } + + return udmrepo.ID(id), nil +} + +func (kow *kopiaObjectWriter) Result() (udmrepo.ID, error) { + if kow.rawWriter == nil { + return udmrepo.ID(""), errors.New("object writer is closed or not open") + } + + id, err := kow.rawWriter.Result() + if err != nil { + return udmrepo.ID(""), errors.Wrap(err, "error to wait object") + } + + return udmrepo.ID(id), nil +} + +func (kow *kopiaObjectWriter) Close() error { + if kow.rawWriter == nil { + return nil + } + + err := kow.rawWriter.Close() + if err != nil { + return err + } + + kow.rawWriter = nil + + return nil +} + +// getAsyncWrites returns the number of concurrent async writes +func getAsyncWrites() int { + return runtime.NumCPU() +} + +// getCompressorForObject returns the compressor for an object, at present, we don't support compression +func getCompressorForObject(opt udmrepo.ObjectWriteOptions) compression.Name { + return "" +} + +func getManifestEntryFromKopia(kMani *manifest.EntryMetadata) *udmrepo.ManifestEntryMetadata { + return &udmrepo.ManifestEntryMetadata{ + ID: udmrepo.ID(kMani.ID), + Labels: kMani.Labels, + Length: int32(kMani.Length), + ModTime: kMani.ModTime, + } +} + +func getManifestEntriesFromKopia(kMani []*manifest.EntryMetadata) []*udmrepo.ManifestEntryMetadata { + var ret []*udmrepo.ManifestEntryMetadata + + for _, entry := range kMani { + ret = append(ret, &udmrepo.ManifestEntryMetadata{ + ID: udmrepo.ID(entry.ID), + Labels: entry.Labels, + Length: int32(entry.Length), + ModTime: entry.ModTime, + }) + } + + return ret +} + +func (lt *logThrottle) shouldLog() bool { + nextOutputTime := atomic.LoadInt64((*int64)(<.lastTime)) + if nowNano := time.Now().UnixNano(); nowNano > nextOutputTime { + if atomic.CompareAndSwapInt64((*int64)(<.lastTime), nextOutputTime, nowNano+lt.interval.Nanoseconds()) { + return true + } + } + + return false +} + +func openKopiaRepo(ctx context.Context, configFile string, password string) (repo.Repository, error) { + r, err := kopiaRepoOpen(ctx, configFile, password, &repo.Options{}) + if os.IsNotExist(err) { + return nil, errors.Wrap(err, "error to open repo, repo doesn't exist") + } + + if err != nil { + return nil, errors.Wrap(err, "error to open repo") + } + + return r, nil +} + +func writeInitParameters(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error { + r, err := openKopiaRepo(ctx, repoOption.ConfigFilePath, repoOption.RepoPassword) + if err != nil { + return err + } + + defer func() { + c := r.Close(ctx) + if c != nil { + logger.WithError(c).Error("Failed to close repo") + } + }() + + err = repo.WriteSession(ctx, r, repo.WriteSessionOptions{ + Purpose: "set init parameters", + }, func(ctx context.Context, w repo.RepositoryWriter) error { + p := maintenance.DefaultParams() + + if overwriteFullMaintainInterval != time.Duration(0) { + logger.Infof("Full maintenance interval change from %v to %v", p.FullCycle.Interval, overwriteFullMaintainInterval) + p.FullCycle.Interval = overwriteFullMaintainInterval + } + + if overwriteQuickMaintainInterval != time.Duration(0) { + logger.Infof("Quick maintenance interval change from %v to %v", p.QuickCycle.Interval, overwriteQuickMaintainInterval) + p.QuickCycle.Interval = overwriteQuickMaintainInterval + } + + p.Owner = r.ClientOptions().UsernameAtHost() + + if err := maintenance.SetParams(ctx, w, &p); err != nil { + return errors.Wrap(err, "error to set maintenance params") + } + + return nil + }) + + if err != nil { + return errors.Wrap(err, "error to init write repo parameters") + } + + return nil +} diff --git a/pkg/repository/udmrepo/kopialib/lib_repo_test.go b/pkg/repository/udmrepo/kopialib/lib_repo_test.go new file mode 100644 index 0000000000..e92bd5ebd8 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/lib_repo_test.go @@ -0,0 +1,406 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopialib + +import ( + "context" + "os" + "testing" + "time" + + "github.com/kopia/kopia/repo" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + repomocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks" + + velerotest "github.com/vmware-tanzu/velero/pkg/test" +) + +func TestOpen(t *testing.T) { + var directRpo *repomocks.DirectRepository + testCases := []struct { + name string + repoOptions udmrepo.RepoOptions + returnRepo *repomocks.DirectRepository + repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error) + newWriterError error + expectedErr string + expected *kopiaRepository + }{ + { + name: "invalid config file", + expectedErr: "invalid config file path", + }, + { + name: "config file doesn't exist", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + }, + expectedErr: "repo config fake-file doesn't exist: stat fake-file: no such file or directory", + }, + { + name: "repo open fail, repo not exist", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, os.ErrNotExist + }, + expectedErr: "error to open repo, repo doesn't exist: file does not exist", + }, + { + name: "repo open fail, other error", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, errors.New("fake-repo-open-error") + }, + expectedErr: "error to open repo: fake-repo-open-error", + }, + { + name: "create repository writer fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + newWriterError: errors.New("fake-new-writer-error"), + expectedErr: "error to create repo writer: fake-new-writer-error", + }, + { + name: "create repository success", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + Description: "fake-description", + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + expected: &kopiaRepository{ + description: "fake-description", + throttle: logThrottle{ + interval: defaultLogInterval, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + logger := velerotest.NewLogger() + + service := kopiaRepoService{ + logger: logger, + } + + if tc.repoOpen != nil { + kopiaRepoOpen = tc.repoOpen + } + + if tc.returnRepo != nil { + directRpo = tc.returnRepo + } + + if tc.returnRepo != nil { + tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(nil, nil, tc.newWriterError) + tc.returnRepo.On("Close", mock.Anything).Return(nil) + } + + repo, err := service.Open(context.Background(), tc.repoOptions) + + if repo != nil { + require.Equal(t, tc.expected.description, repo.(*kopiaRepository).description) + require.Equal(t, tc.expected.throttle.interval, repo.(*kopiaRepository).throttle.interval) + require.Equal(t, repo.(*kopiaRepository).logger, logger) + } + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestMaintain(t *testing.T) { + var directRpo *repomocks.DirectRepository + testCases := []struct { + name string + repoOptions udmrepo.RepoOptions + returnRepo *repomocks.DirectRepository + returnRepoWriter *repomocks.DirectRepositoryWriter + repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error) + newRepoWriterError error + findManifestError error + expectedErr string + }{ + { + name: "invalid config file", + expectedErr: "invalid config file path", + }, + { + name: "config file doesn't exist", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + }, + expectedErr: "repo config fake-file doesn't exist: stat fake-file: no such file or directory", + }, + { + name: "repo open fail, repo not exist", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, os.ErrNotExist + }, + expectedErr: "error to open repo, repo doesn't exist: file does not exist", + }, + { + name: "repo open fail, other error", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, errors.New("fake-repo-open-error") + }, + expectedErr: "error to open repo: fake-repo-open-error", + }, + { + name: "write session fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + newRepoWriterError: errors.New("fake-new-direct-writer-error"), + expectedErr: "error to maintain repo: unable to create direct writer: fake-new-direct-writer-error", + }, + { + name: "maintain fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + returnRepoWriter: new(repomocks.DirectRepositoryWriter), + findManifestError: errors.New("fake-find-manifest-error"), + expectedErr: "error to maintain repo: error to run maintenance under mode auto: unable to get maintenance params: error looking for maintenance manifest: fake-find-manifest-error", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + logger := velerotest.NewLogger() + ctx := context.Background() + + service := kopiaRepoService{ + logger: logger, + } + + if tc.repoOpen != nil { + kopiaRepoOpen = tc.repoOpen + } + + if tc.returnRepo != nil { + directRpo = tc.returnRepo + } + + if tc.returnRepo != nil { + tc.returnRepo.On("NewDirectWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError) + tc.returnRepo.On("Close", mock.Anything).Return(nil) + } + + if tc.returnRepoWriter != nil { + tc.returnRepoWriter.On("DisableIndexRefresh").Return() + tc.returnRepoWriter.On("AlsoLogToContentLog", mock.Anything).Return(nil) + tc.returnRepoWriter.On("Close", mock.Anything).Return(nil) + tc.returnRepoWriter.On("FindManifests", mock.Anything, mock.Anything).Return(nil, tc.findManifestError) + } + + err := service.Maintain(ctx, tc.repoOptions) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestWriteInitParameters(t *testing.T) { + var directRpo *repomocks.DirectRepository + testCases := []struct { + name string + repoOptions udmrepo.RepoOptions + returnRepo *repomocks.DirectRepository + returnRepoWriter *repomocks.DirectRepositoryWriter + repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error) + newRepoWriterError error + findManifestError error + expectedErr string + }{ + { + name: "repo open fail, repo not exist", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, os.ErrNotExist + }, + expectedErr: "error to open repo, repo doesn't exist: file does not exist", + }, + { + name: "repo open fail, other error", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return nil, errors.New("fake-repo-open-error") + }, + expectedErr: "error to open repo: fake-repo-open-error", + }, + { + name: "write session fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + newRepoWriterError: errors.New("fake-new-writer-error"), + expectedErr: "error to init write repo parameters: unable to create writer: fake-new-writer-error", + }, + { + name: "set repo param fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "/tmp", + GeneralOptions: map[string]string{}, + }, + repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) { + return directRpo, nil + }, + returnRepo: new(repomocks.DirectRepository), + returnRepoWriter: new(repomocks.DirectRepositoryWriter), + findManifestError: errors.New("fake-find-manifest-error"), + expectedErr: "error to init write repo parameters: error to set maintenance params: error looking for maintenance manifest: fake-find-manifest-error", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + logger := velerotest.NewLogger() + ctx := context.Background() + + if tc.repoOpen != nil { + kopiaRepoOpen = tc.repoOpen + } + + if tc.returnRepo != nil { + directRpo = tc.returnRepo + } + + if tc.returnRepo != nil { + tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError) + tc.returnRepo.On("ClientOptions").Return(repo.ClientOptions{}) + tc.returnRepo.On("Close", mock.Anything).Return(nil) + } + + if tc.returnRepoWriter != nil { + tc.returnRepoWriter.On("Close", mock.Anything).Return(nil) + tc.returnRepoWriter.On("FindManifests", mock.Anything, mock.Anything).Return(nil, tc.findManifestError) + } + + err := writeInitParameters(ctx, tc.repoOptions, logger) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestShouldLog(t *testing.T) { + testCases := []struct { + name string + lastTime int64 + interval time.Duration + retValue bool + }{ + { + name: "first time", + retValue: true, + }, + { + name: "not run", + lastTime: time.Now().Add(time.Hour).UnixNano(), + interval: time.Second * 10, + }, + { + name: "not first time, run", + lastTime: time.Now().Add(-time.Hour).UnixNano(), + interval: time.Second * 10, + retValue: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lt := logThrottle{ + lastTime: tc.lastTime, + interval: tc.interval, + } + + before := lt.lastTime + + nw := time.Now() + + s := lt.shouldLog() + + require.Equal(t, s, tc.retValue) + + if s { + require.GreaterOrEqual(t, lt.lastTime-nw.UnixNano(), lt.interval) + } else { + require.Equal(t, lt.lastTime, before) + } + }) + } +} diff --git a/pkg/repository/udmrepo/kopialib/repo_init.go b/pkg/repository/udmrepo/kopialib/repo_init.go new file mode 100644 index 0000000000..c6407bde47 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/repo_init.go @@ -0,0 +1,160 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopialib + +import ( + "context" + "strings" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/blob" + "github.com/pkg/errors" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend" +) + +type kopiaBackendStore struct { + name string + description string + store backend.Store +} + +// backendStores lists the supported backend storages at present +var backendStores []kopiaBackendStore = []kopiaBackendStore{ + {udmrepo.StorageTypeAzure, "an Azure blob storage", &backend.AzureBackend{}}, + {udmrepo.StorageTypeFs, "a filesystem", &backend.FsBackend{}}, + {udmrepo.StorageTypeGcs, "a Google Cloud Storage bucket", &backend.GCSBackend{}}, + {udmrepo.StorageTypeS3, "an S3 bucket", &backend.S3Backend{}}, +} + +// CreateBackupRepo creates a Kopia repository and then connect to it. +// The storage must be empty, otherwise, it will fail +func CreateBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions) error { + if repoOption.ConfigFilePath == "" { + return errors.New("invalid config file path") + } + + backendStore, err := setupBackendStore(ctx, repoOption.StorageType, repoOption.StorageOptions) + if err != nil { + return errors.Wrap(err, "error to setup backend storage") + } + + st, err := backendStore.store.Connect(ctx, true) + if err != nil { + return errors.Wrap(err, "error to connect to storage") + } + + err = createWithStorage(ctx, st, repoOption) + if err != nil { + return errors.Wrap(err, "error to create repo with storage") + } + + err = connectWithStorage(ctx, st, repoOption) + if err != nil { + return errors.Wrap(err, "error to connect repo with storage") + } + + return nil +} + +// ConnectBackupRepo connects to an existing Kopia repository. +// If the repository doesn't exist, it will fail +func ConnectBackupRepo(ctx context.Context, repoOption udmrepo.RepoOptions) error { + if repoOption.ConfigFilePath == "" { + return errors.New("invalid config file path") + } + + backendStore, err := setupBackendStore(ctx, repoOption.StorageType, repoOption.StorageOptions) + if err != nil { + return errors.Wrap(err, "error to setup backend storage") + } + + st, err := backendStore.store.Connect(ctx, false) + if err != nil { + return errors.Wrap(err, "error to connect to storage") + } + + err = connectWithStorage(ctx, st, repoOption) + if err != nil { + return errors.Wrap(err, "error to connect repo with storage") + } + + return nil +} + +func findBackendStore(storage string) *kopiaBackendStore { + for _, options := range backendStores { + if strings.EqualFold(options.name, storage) { + return &options + } + } + + return nil +} + +func setupBackendStore(ctx context.Context, storageType string, storageOptions map[string]string) (*kopiaBackendStore, error) { + backendStore := findBackendStore(storageType) + if backendStore == nil { + return nil, errors.New("error to find storage type") + } + + err := backendStore.store.Setup(ctx, storageOptions) + if err != nil { + return nil, errors.Wrap(err, "error to setup storage") + } + + return backendStore, nil +} + +func createWithStorage(ctx context.Context, st blob.Storage, repoOption udmrepo.RepoOptions) error { + err := ensureEmpty(ctx, st) + if err != nil { + return errors.Wrap(err, "error to ensure repository storage empty") + } + + options := backend.SetupNewRepositoryOptions(ctx, repoOption.GeneralOptions) + + if err := repo.Initialize(ctx, st, &options, repoOption.RepoPassword); err != nil { + return errors.Wrap(err, "error to initialize repository") + } + + return nil +} + +func connectWithStorage(ctx context.Context, st blob.Storage, repoOption udmrepo.RepoOptions) error { + options := backend.SetupConnectOptions(ctx, repoOption) + if err := repo.Connect(ctx, repoOption.ConfigFilePath, st, repoOption.RepoPassword, &options); err != nil { + return errors.Wrap(err, "error to connect to repository") + } + + return nil +} + +func ensureEmpty(ctx context.Context, s blob.Storage) error { + hasDataError := errors.Errorf("has data") + + err := s.ListBlobs(ctx, "", func(cb blob.Metadata) error { + return hasDataError + }) + + if errors.Is(err, hasDataError) { + return errors.New("found existing data in storage location") + } + + return errors.Wrap(err, "error to list blobs") +} diff --git a/pkg/repository/udmrepo/kopialib/repo_init_test.go b/pkg/repository/udmrepo/kopialib/repo_init_test.go new file mode 100644 index 0000000000..f91296d1f3 --- /dev/null +++ b/pkg/repository/udmrepo/kopialib/repo_init_test.go @@ -0,0 +1,237 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopialib + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + storagemocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks" + + "github.com/pkg/errors" +) + +type comparableError struct { + message string +} + +func (ce *comparableError) Error() string { + return ce.message +} + +func (ce *comparableError) Is(err error) bool { + return err.Error() == ce.message +} + +func TestCreateBackupRepo(t *testing.T) { + testCases := []struct { + name string + backendStore *storagemocks.Store + repoOptions udmrepo.RepoOptions + connectErr error + setupError error + returnStore *storagemocks.Storage + storeListErr error + getBlobErr error + listBlobErr error + expectedErr string + }{ + { + name: "invalid config file", + expectedErr: "invalid config file path", + }, + { + name: "storage setup fail, invalid type", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + }, + expectedErr: "error to setup backend storage: error to find storage type", + }, + { + name: "storage setup fail, backend store steup fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + setupError: errors.New("fake-setup-error"), + expectedErr: "error to setup backend storage: error to setup storage: fake-setup-error", + }, + { + name: "storage connect fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + connectErr: errors.New("fake-connect-error"), + expectedErr: "error to connect to storage: fake-connect-error", + }, + { + name: "create repository error, exist blobs", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + returnStore: new(storagemocks.Storage), + listBlobErr: &comparableError{ + message: "has data", + }, + expectedErr: "error to create repo with storage: error to ensure repository storage empty: found existing data in storage location", + }, + { + name: "create repository error, error list blobs", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + returnStore: new(storagemocks.Storage), + listBlobErr: errors.New("fake-list-blob-error"), + expectedErr: "error to create repo with storage: error to ensure repository storage empty: error to list blobs: fake-list-blob-error", + }, + { + name: "create repository error, initialize error", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + returnStore: new(storagemocks.Storage), + getBlobErr: errors.New("fake-list-blob-error-01"), + expectedErr: "error to create repo with storage: error to initialize repository: unexpected error when checking for format blob: fake-list-blob-error-01", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + backendStores = []kopiaBackendStore{ + {udmrepo.StorageTypeAzure, "fake store", tc.backendStore}, + {udmrepo.StorageTypeFs, "fake store", tc.backendStore}, + {udmrepo.StorageTypeGcs, "fake store", tc.backendStore}, + {udmrepo.StorageTypeS3, "fake store", tc.backendStore}, + } + + if tc.backendStore != nil { + tc.backendStore.On("Connect", mock.Anything, mock.Anything, mock.Anything).Return(tc.returnStore, tc.connectErr) + tc.backendStore.On("Setup", mock.Anything, mock.Anything, mock.Anything).Return(tc.setupError) + } + + if tc.returnStore != nil { + tc.returnStore.On("ListBlobs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.listBlobErr) + tc.returnStore.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.getBlobErr) + } + + err := CreateBackupRepo(context.Background(), tc.repoOptions) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestConnectBackupRepo(t *testing.T) { + testCases := []struct { + name string + backendStore *storagemocks.Store + repoOptions udmrepo.RepoOptions + connectErr error + setupError error + returnStore *storagemocks.Storage + getBlobErr error + expectedErr string + }{ + { + name: "invalid config file", + expectedErr: "invalid config file path", + }, + { + name: "storage setup fail, invalid type", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + }, + expectedErr: "error to setup backend storage: error to find storage type", + }, + { + name: "storage setup fail, backend store steup fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + setupError: errors.New("fake-setup-error"), + expectedErr: "error to setup backend storage: error to setup storage: fake-setup-error", + }, + { + name: "storage connect fail", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + connectErr: errors.New("fake-connect-error"), + expectedErr: "error to connect to storage: fake-connect-error", + }, + { + name: "connect repository error", + repoOptions: udmrepo.RepoOptions{ + ConfigFilePath: "fake-file", + StorageType: udmrepo.StorageTypeAzure, + }, + backendStore: new(storagemocks.Store), + returnStore: new(storagemocks.Storage), + getBlobErr: errors.New("fake-get-blob-error"), + expectedErr: "error to connect repo with storage: error to connect to repository: unable to read format blob: fake-get-blob-error", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + backendStores = []kopiaBackendStore{ + {udmrepo.StorageTypeAzure, "fake store", tc.backendStore}, + {udmrepo.StorageTypeFs, "fake store", tc.backendStore}, + {udmrepo.StorageTypeGcs, "fake store", tc.backendStore}, + {udmrepo.StorageTypeS3, "fake store", tc.backendStore}, + } + + if tc.backendStore != nil { + tc.backendStore.On("Connect", mock.Anything, mock.Anything, mock.Anything).Return(tc.returnStore, tc.connectErr) + tc.backendStore.On("Setup", mock.Anything, mock.Anything, mock.Anything).Return(tc.setupError) + } + + if tc.returnStore != nil { + tc.returnStore.On("GetBlob", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.getBlobErr) + } + + err := ConnectBackupRepo(context.Background(), tc.repoOptions) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/mocks/BackupRepoService.go b/pkg/repository/udmrepo/mocks/BackupRepoService.go index 135c0058c4..b61bf5ccc3 100644 --- a/pkg/repository/udmrepo/mocks/BackupRepoService.go +++ b/pkg/repository/udmrepo/mocks/BackupRepoService.go @@ -4,8 +4,10 @@ package mocks import ( context "context" + time "time" mock "github.com/stretchr/testify/mock" + udmrepo "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" ) @@ -14,6 +16,20 @@ type BackupRepoService struct { mock.Mock } +// DefaultMaintenanceFrequency provides a mock function with given fields: +func (_m *BackupRepoService) DefaultMaintenanceFrequency() time.Duration { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + // Init provides a mock function with given fields: ctx, repoOption, createNew func (_m *BackupRepoService) Init(ctx context.Context, repoOption udmrepo.RepoOptions, createNew bool) error { ret := _m.Called(ctx, repoOption, createNew) diff --git a/pkg/repository/udmrepo/repo.go b/pkg/repository/udmrepo/repo.go index 01d434fdad..6bec441898 100644 --- a/pkg/repository/udmrepo/repo.go +++ b/pkg/repository/udmrepo/repo.go @@ -84,6 +84,10 @@ type BackupRepoService interface { // Maintain is periodically called to maintain the backup repository to eliminate redundant data. // repoOption: options to maintain the backup repository. Maintain(ctx context.Context, repoOption RepoOptions) error + + // DefaultMaintenanceFrequency returns the defgault frequency of maintenance, callers refer this + // frequency to maintain the backup repository to get the best maintenance performance + DefaultMaintenanceFrequency() time.Duration } // BackupRepo provides the access to the backup repository diff --git a/pkg/repository/udmrepo/repo-options.go b/pkg/repository/udmrepo/repo_options.go similarity index 76% rename from pkg/repository/udmrepo/repo-options.go rename to pkg/repository/udmrepo/repo_options.go index f11c0d9424..f4a043ff2a 100644 --- a/pkg/repository/udmrepo/repo-options.go +++ b/pkg/repository/udmrepo/repo_options.go @@ -32,6 +32,9 @@ const ( GenOptionMaintainFull = "full" GenOptionMaintainQuick = "quick" + GenOptionOwnerName = "username" + GenOptionOwnerDomain = "domainname" + StoreOptionS3KeyId = "accessKeyID" StoreOptionS3Provider = "providerName" StoreOptionS3SecretKey = "secretAccessKey" @@ -56,6 +59,14 @@ const ( StoreOptionPrefix = "prefix" StoreOptionPrefixName = "unified-repo" + StoreOptionGenHashAlgo = "hashAlgo" + StoreOptionGenEncryptAlgo = "encryptAlgo" + StoreOptionGenSplitAlgo = "splitAlgo" + + StoreOptionGenRetentionMode = "retentionMode" + StoreOptionGenRetentionPeriod = "retentionPeriod" + StoreOptionGenReadOnly = "readOnly" + ThrottleOptionReadOps = "readOPS" ThrottleOptionWriteOps = "writeOPS" ThrottleOptionListOps = "listOPS" @@ -63,6 +74,11 @@ const ( ThrottleOptionDownloadBytes = "downloadBytes" ) +const ( + defaultUsername = "default" + defaultDomain = "default" +) + type RepoOptions struct { // StorageType is a repository specific string to identify a backup storage, i.e., "s3", "filesystem" StorageType string @@ -80,17 +96,24 @@ type RepoOptions struct { Description string } +// PasswordGetter defines the method to get a repository password. type PasswordGetter interface { GetPassword(param interface{}) (string, error) } +// StoreOptionsGetter defines the methods to get the storage related options. type StoreOptionsGetter interface { GetStoreType(param interface{}) (string, error) GetStoreOptions(param interface{}) (map[string]string, error) } +// NewRepoOptions creates a new RepoOptions for different purpose func NewRepoOptions(optionFuncs ...func(*RepoOptions) error) (*RepoOptions, error) { - options := &RepoOptions{} + options := &RepoOptions{ + GeneralOptions: make(map[string]string), + StorageOptions: make(map[string]string), + } + for _, optionFunc := range optionFuncs { err := optionFunc(options) if err != nil { @@ -101,6 +124,8 @@ func NewRepoOptions(optionFuncs ...func(*RepoOptions) error) (*RepoOptions, erro return options, nil } +// WithPassword sets the RepoPassword to RepoOptions, the password is acquired through +// the provided interface func WithPassword(getter PasswordGetter, param interface{}) func(*RepoOptions) error { return func(options *RepoOptions) error { password, err := getter.GetPassword(param) @@ -114,6 +139,7 @@ func WithPassword(getter PasswordGetter, param interface{}) func(*RepoOptions) e } } +// WithConfigFile sets the ConfigFilePath to RepoOptions func WithConfigFile(workPath string, repoID string) func(*RepoOptions) error { return func(options *RepoOptions) error { options.ConfigFilePath = getRepoConfigFile(workPath, repoID) @@ -121,6 +147,7 @@ func WithConfigFile(workPath string, repoID string) func(*RepoOptions) error { } } +// WithGenOptions sets the GeneralOptions to RepoOptions func WithGenOptions(genOptions map[string]string) func(*RepoOptions) error { return func(options *RepoOptions) error { for k, v := range genOptions { @@ -131,6 +158,8 @@ func WithGenOptions(genOptions map[string]string) func(*RepoOptions) error { } } +// WithStoreOptions sets the StorageOptions to RepoOptions, the store options are acquired through +// the provided interface func WithStoreOptions(getter StoreOptionsGetter, param interface{}) func(*RepoOptions) error { return func(options *RepoOptions) error { storeType, err := getter.GetStoreType(param) @@ -153,6 +182,7 @@ func WithStoreOptions(getter StoreOptionsGetter, param interface{}) func(*RepoOp } } +// WithDescription sets the Description to RepoOptions func WithDescription(desc string) func(*RepoOptions) error { return func(options *RepoOptions) error { options.Description = desc @@ -160,6 +190,16 @@ func WithDescription(desc string) func(*RepoOptions) error { } } +// GetRepoUser returns the default username that is used to manipulate the Unified Repo +func GetRepoUser() string { + return defaultUsername +} + +// GetRepoDomain returns the default user domain that is used to manipulate the Unified Repo +func GetRepoDomain() string { + return defaultDomain +} + func getRepoConfigFile(workPath string, repoID string) string { if workPath == "" { workPath = filepath.Join(os.Getenv("HOME"), "udmrepo") diff --git a/pkg/repository/udmrepo/service/service.go b/pkg/repository/udmrepo/service/service.go index 55fbb03c8f..c2f0a9b0e6 100644 --- a/pkg/repository/udmrepo/service/service.go +++ b/pkg/repository/udmrepo/service/service.go @@ -20,18 +20,10 @@ import ( "github.com/sirupsen/logrus" "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib" ) -const ( - defaultUsername = "default" - defaultDomain = "default" -) - +// Create creates an instance of BackupRepoService func Create(logger logrus.FieldLogger) udmrepo.BackupRepoService { - ///TODO: create from kopiaLib - return nil -} - -func GetRepoUser() (username, domain string) { - return defaultUsername, defaultDomain + return kopialib.NewKopiaRepoService(logger) } diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 860f983f72..f1ecb9a718 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -17,7 +17,6 @@ limitations under the License. package restic import ( - "context" "fmt" "os" "strconv" @@ -25,33 +24,19 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/label" repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) const ( - // DaemonSet is the name of the Velero restic daemonset. - DaemonSet = "restic" - - // InitContainer is the name of the init container added - // to workload pods to help with restores. - InitContainer = "restic-wait" // DefaultMaintenanceFrequency is the default time interval // at which restic prune is run. DefaultMaintenanceFrequency = 7 * 24 * time.Hour - // DefaultVolumesToRestic specifies whether restic should be used, by default, to - // take backup of all pod volumes. - DefaultVolumesToRestic = false - // insecureSkipTLSVerifyKey is the flag in BackupStorageLocation's config // to indicate whether to skip TLS verify to setup insecure HTTPS connection. insecureSkipTLSVerifyKey = "insecureSkipTLSVerify" @@ -61,51 +46,6 @@ const ( resticInsecureTLSFlag = "--insecure-tls" ) -// SnapshotIdentifier uniquely identifies a restic snapshot -// taken by Velero. -type SnapshotIdentifier struct { - // VolumeNamespace is the namespace of the pod/volume that - // the restic snapshot is for. - VolumeNamespace string - - // BackupStorageLocation is the backup's storage location - // name. - BackupStorageLocation string - - // SnapshotID is the short ID of the restic snapshot. - SnapshotID string -} - -// GetSnapshotsInBackup returns a list of all restic snapshot ids associated with -// a given Velero backup. -func GetSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbClient client.Client) ([]SnapshotIdentifier, error) { - podVolumeBackups := &velerov1api.PodVolumeBackupList{} - options := &client.ListOptions{ - LabelSelector: labels.Set(map[string]string{ - velerov1api.BackupNameLabel: label.GetValidName(backup.Name), - }).AsSelector(), - } - - err := kbClient.List(ctx, podVolumeBackups, options) - if err != nil { - return nil, errors.WithStack(err) - } - - var res []SnapshotIdentifier - for _, item := range podVolumeBackups.Items { - if item.Status.SnapshotID == "" { - continue - } - res = append(res, SnapshotIdentifier{ - VolumeNamespace: item.Spec.Pod.Namespace, - BackupStorageLocation: backup.Spec.StorageLocation, - SnapshotID: item.Status.SnapshotID, - }) - } - - return res, nil -} - // TempCACertFile creates a temp file containing a CA bundle // and returns its path. The caller should generally call os.Remove() // to remove the file when done with it. @@ -131,14 +71,6 @@ func TempCACertFile(caCert []byte, bsl string, fs filesystem.Interface) (string, return name, nil } -// NewPodVolumeRestoreListOptions creates a ListOptions with a label selector configured to -// find PodVolumeRestores for the restore identified by name. -func NewPodVolumeRestoreListOptions(name string) metav1.ListOptions { - return metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", velerov1api.RestoreNameLabel, label.GetValidName(name)), - } -} - // CmdEnv returns a list of environment variables (in the format var=val) that // should be used when running a restic command for a particular backend provider. // This list is the current environment, plus any provider-specific variables restic needs. diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index b2acee773f..97363340c0 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -17,190 +17,17 @@ limitations under the License. package restic import ( - "context" "os" - "sort" "testing" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - corev1api "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) -func TestGetSnapshotsInBackup(t *testing.T) { - tests := []struct { - name string - podVolumeBackups []velerov1api.PodVolumeBackup - expected []SnapshotIdentifier - longBackupNameEnabled bool - }{ - { - name: "no pod volume backups", - podVolumeBackups: nil, - expected: nil, - }, - { - name: "no pod volume backups with matching label", - podVolumeBackups: []velerov1api.PodVolumeBackup{ - { - ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, - }, - }, - expected: nil, - }, - { - name: "some pod volume backups with matching label", - podVolumeBackups: []velerov1api.PodVolumeBackup{ - { - ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""}, - }, - }, - expected: []SnapshotIdentifier{ - { - VolumeNamespace: "ns-1", - SnapshotID: "snap-3", - }, - { - VolumeNamespace: "ns-1", - SnapshotID: "snap-4", - }, - }, - }, - { - name: "some pod volume backups with matching label and backup name greater than 63 chars", - longBackupNameEnabled: true, - podVolumeBackups: []velerov1api.PodVolumeBackup{ - { - ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "the-really-long-backup-name-that-is-much-more-than-63-cha6ca4bc"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"}, - }, - Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""}, - }, - }, - expected: []SnapshotIdentifier{ - { - VolumeNamespace: "ns-1", - SnapshotID: "snap-3", - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var ( - clientBuilder = velerotest.NewFakeControllerRuntimeClientBuilder(t) - veleroBackup = &velerov1api.Backup{} - ) - - veleroBackup.Name = "backup-1" - - if test.longBackupNameEnabled { - veleroBackup.Name = "the-really-long-backup-name-that-is-much-more-than-63-characters" - } - clientBuilder.WithLists(&velerov1api.PodVolumeBackupList{ - Items: test.podVolumeBackups, - }) - - res, err := GetSnapshotsInBackup(context.TODO(), veleroBackup, clientBuilder.Build()) - assert.NoError(t, err) - - // sort to ensure good compare of slices - less := func(snapshots []SnapshotIdentifier) func(i, j int) bool { - return func(i, j int) bool { - if snapshots[i].VolumeNamespace == snapshots[j].VolumeNamespace { - return snapshots[i].SnapshotID < snapshots[j].SnapshotID - } - return snapshots[i].VolumeNamespace < snapshots[j].VolumeNamespace - } - - } - - sort.Slice(test.expected, less(test.expected)) - sort.Slice(res, less(res)) - - assert.Equal(t, test.expected, res) - }) - } -} - func TestTempCACertFile(t *testing.T) { var ( fs = velerotest.NewFakeFileSystem() diff --git a/pkg/restic/exec_commands.go b/pkg/restic/exec_commands.go index 7dd0057c0f..22c1a96659 100644 --- a/pkg/restic/exec_commands.go +++ b/pkg/restic/exec_commands.go @@ -26,7 +26,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/exec" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -71,7 +71,7 @@ func GetSnapshotID(snapshotIdCmd *Command) (string, error) { // RunBackup runs a `restic backup` command and watches the output to provide // progress updates to the caller. -func RunBackup(backupCmd *Command, log logrus.FieldLogger, updateFunc func(velerov1api.PodVolumeOperationProgress)) (string, string, error) { +func RunBackup(backupCmd *Command, log logrus.FieldLogger, updater uploader.ProgressUpdater) (string, string, error) { // buffers for copying command stdout/err output into stdoutBuf := new(bytes.Buffer) stderrBuf := new(bytes.Buffer) @@ -104,9 +104,9 @@ func RunBackup(backupCmd *Command, log logrus.FieldLogger, updateFunc func(veler // if the line contains a non-empty bytes_done field, we can update the // caller with the progress if stat.BytesDone != 0 { - updateFunc(velerov1api.PodVolumeOperationProgress{ - TotalBytes: stat.TotalBytes, - BytesDone: stat.BytesDone, + updater.UpdateProgress(&uploader.UploaderProgress{ + TotalBytes: stat.TotalBytesProcessed, + BytesDone: stat.TotalBytesProcessed, }) } } @@ -136,7 +136,7 @@ func RunBackup(backupCmd *Command, log logrus.FieldLogger, updateFunc func(veler } // update progress to 100% - updateFunc(velerov1api.PodVolumeOperationProgress{ + updater.UpdateProgress(&uploader.UploaderProgress{ TotalBytes: stat.TotalBytesProcessed, BytesDone: stat.TotalBytesProcessed, }) @@ -184,7 +184,7 @@ func getSummaryLine(b []byte) ([]byte, error) { // RunRestore runs a `restic restore` command and monitors the volume size to // provide progress updates to the caller. -func RunRestore(restoreCmd *Command, log logrus.FieldLogger, updateFunc func(velerov1api.PodVolumeOperationProgress)) (string, string, error) { +func RunRestore(restoreCmd *Command, log logrus.FieldLogger, updater uploader.ProgressUpdater) (string, string, error) { insecureTLSFlag := "" for _, extraFlag := range restoreCmd.ExtraFlags { @@ -198,7 +198,7 @@ func RunRestore(restoreCmd *Command, log logrus.FieldLogger, updateFunc func(vel return "", "", errors.Wrap(err, "error getting snapshot size") } - updateFunc(velerov1api.PodVolumeOperationProgress{ + updater.UpdateProgress(&uploader.UploaderProgress{ TotalBytes: snapshotSize, }) @@ -216,10 +216,12 @@ func RunRestore(restoreCmd *Command, log logrus.FieldLogger, updateFunc func(vel log.WithError(err).Errorf("error getting restic restore progress") } - updateFunc(velerov1api.PodVolumeOperationProgress{ - TotalBytes: snapshotSize, - BytesDone: volumeSize, - }) + if volumeSize != 0 { + updater.UpdateProgress(&uploader.UploaderProgress{ + TotalBytes: snapshotSize, + BytesDone: volumeSize, + }) + } case <-quit: ticker.Stop() return @@ -231,7 +233,7 @@ func RunRestore(restoreCmd *Command, log logrus.FieldLogger, updateFunc func(vel quit <- struct{}{} // update progress to 100% - updateFunc(velerov1api.PodVolumeOperationProgress{ + updater.UpdateProgress(&uploader.UploaderProgress{ TotalBytes: snapshotSize, BytesDone: snapshotSize, }) diff --git a/pkg/restic/executer.go b/pkg/restic/executer.go deleted file mode 100644 index e89883e76c..0000000000 --- a/pkg/restic/executer.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright The Velero Contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restic - -import ( - "github.com/sirupsen/logrus" - - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" -) - -// BackupExec is able to run backups. -type BackupExec struct{} - -// RunBackup is a wrapper for the restic.RunBackup function in order to be able -// to use interfaces (and swap out objects for testing purposes). -func (exec BackupExec) RunBackup(cmd *Command, log logrus.FieldLogger, updateFn func(velerov1api.PodVolumeOperationProgress)) (string, string, error) { - return RunBackup(cmd, log, updateFn) -} - -// GetSnapshotID gets the Restic snapshot ID. -func (exec BackupExec) GetSnapshotID(snapshotIdCmd *Command) (string, error) { - return GetSnapshotID(snapshotIdCmd) -} diff --git a/pkg/restic/mocks/fake_restic_executer.go b/pkg/restic/mocks/fake_restic_executer.go deleted file mode 100644 index 9dcae9574c..0000000000 --- a/pkg/restic/mocks/fake_restic_executer.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright The Velero Contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mocks - -import ( - "github.com/sirupsen/logrus" - - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/restic" -) - -// FakeResticBackupExec represents an object that can run backups. -type FakeResticBackupExec struct{} - -// RunBackup runs a Restic backup. -func (exec FakeResticBackupExec) RunBackup(cmd *restic.Command, log logrus.FieldLogger, updateFn func(velerov1api.PodVolumeOperationProgress)) (string, string, error) { - return "", "", nil -} - -// GetSnapshotID gets the Restic snapshot ID. -func (exec FakeResticBackupExec) GetSnapshotID(cmd *restic.Command) (string, error) { - return "", nil -} \ No newline at end of file diff --git a/pkg/restore/add_pv_from_pvc_action.go b/pkg/restore/add_pv_from_pvc_action.go index 04c992357f..147ad553f4 100644 --- a/pkg/restore/add_pv_from_pvc_action.go +++ b/pkg/restore/add_pv_from_pvc_action.go @@ -24,6 +24,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) type AddPVFromPVCAction struct { @@ -40,7 +41,7 @@ func (a *AddPVFromPVCAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *AddPVFromPVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *AddPVFromPVCAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing AddPVFromPVCAction") // use input.ItemFromBackup because we need to look at status fields, which have already been @@ -53,7 +54,7 @@ func (a *AddPVFromPVCAction) Execute(input *velero.RestoreItemActionExecuteInput // TODO: consolidate this logic in a helper function to share with backup_pv_action.go if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" { a.logger.Info("PVC is not bound or its volume name is empty") - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, }, nil } @@ -64,7 +65,7 @@ func (a *AddPVFromPVCAction) Execute(input *velero.RestoreItemActionExecuteInput } a.logger.Infof("Adding PV %s as an additional item to restore", pvc.Spec.VolumeName) - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{pv}, }, nil diff --git a/pkg/restore/add_pv_from_pvc_action_test.go b/pkg/restore/add_pv_from_pvc_action_test.go index b4ceb90a43..f8b5b5aaba 100644 --- a/pkg/restore/add_pv_from_pvc_action_test.go +++ b/pkg/restore/add_pv_from_pvc_action_test.go @@ -27,6 +27,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -89,7 +90,7 @@ func TestAddPVFromPVCActionExecute(t *testing.T) { action := &AddPVFromPVCAction{logger: velerotest.NewLogger()} - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: itemData}, ItemFromBackup: &unstructured.Unstructured{Object: itemFromBackupData}, } diff --git a/pkg/restore/add_pvc_from_pod_action.go b/pkg/restore/add_pvc_from_pod_action.go index 70f33d985d..b7bd60a34c 100644 --- a/pkg/restore/add_pvc_from_pod_action.go +++ b/pkg/restore/add_pvc_from_pod_action.go @@ -24,6 +24,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) type AddPVCFromPodAction struct { @@ -40,7 +41,7 @@ func (a *AddPVCFromPodAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *AddPVCFromPodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *AddPVCFromPodAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing AddPVCFromPodAction") var pod corev1api.Pod @@ -63,7 +64,7 @@ func (a *AddPVCFromPodAction) Execute(input *velero.RestoreItemActionExecuteInpu }) } - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: additionalItems, }, nil diff --git a/pkg/restore/add_pvc_from_pod_action_test.go b/pkg/restore/add_pvc_from_pod_action_test.go index b409e5d6ef..529751b379 100644 --- a/pkg/restore/add_pvc_from_pod_action_test.go +++ b/pkg/restore/add_pvc_from_pod_action_test.go @@ -28,6 +28,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -100,7 +101,7 @@ func TestAddPVCFromPodActionExecute(t *testing.T) { action := &AddPVCFromPodAction{logger: velerotest.NewLogger()} - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: itemData}, } diff --git a/pkg/restore/admissionwebhook_config_action.go b/pkg/restore/admissionwebhook_config_action.go index 8fd5c1693e..68fa089888 100644 --- a/pkg/restore/admissionwebhook_config_action.go +++ b/pkg/restore/admissionwebhook_config_action.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // AdmissionWebhookConfigurationAction is a RestoreItemAction plugin applicable to mutatingwebhookconfiguration and @@ -46,7 +47,7 @@ func (a *AdmissionWebhookConfigurationAction) AppliesTo() (velero.ResourceSelect // Execute will reset the value of "sideEffects" attribute of each item in the "webhooks" list to "None" if they are invalid values for // v1, such as "Unknown" or "Some" -func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *AdmissionWebhookConfigurationAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing ChangeStorageClassAction") defer a.logger.Info("Done executing ChangeStorageClassAction") @@ -59,7 +60,7 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA logger := a.logger.WithField("resource_name", name) if apiVersion != "admissionregistration.k8s.io/v1" { logger.Infof("unable to handle api version: %s, skip", apiVersion) - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } webhooks, ok, err := unstructured.NestedSlice(item.UnstructuredContent(), "webhooks") if err != nil { @@ -67,7 +68,7 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA } if !ok { logger.Info("webhooks is not set, skip") - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } newWebhooks := make([]interface{}, 0) for i, entry := range webhooks { @@ -85,5 +86,5 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA newWebhooks = append(newWebhooks, obj) } item.UnstructuredContent()["webhooks"] = newWebhooks - return velero.NewRestoreItemActionExecuteOutput(item), nil + return riav1.NewRestoreItemActionExecuteOutput(item), nil } diff --git a/pkg/restore/admissionwebhook_config_action_test.go b/pkg/restore/admissionwebhook_config_action_test.go index c6c31d2219..a6548ae586 100644 --- a/pkg/restore/admissionwebhook_config_action_test.go +++ b/pkg/restore/admissionwebhook_config_action_test.go @@ -8,7 +8,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -163,7 +163,7 @@ func TestNewAdmissionWebhookConfigurationActionExecute(t *testing.T) { t.Run(tt.name, func(t *testing.T) { o := map[string]interface{}{} json.Unmarshal([]byte(tt.itemJSON), &o) - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{ Object: o, }, diff --git a/pkg/restore/apiservice_action.go b/pkg/restore/apiservice_action.go index 7f817a59e0..0d9568c0b8 100644 --- a/pkg/restore/apiservice_action.go +++ b/pkg/restore/apiservice_action.go @@ -21,6 +21,7 @@ import ( "k8s.io/kube-aggregator/pkg/controllers/autoregister" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) type APIServiceAction struct { @@ -42,10 +43,10 @@ func (a *APIServiceAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *APIServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *APIServiceAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing APIServiceAction") defer a.logger.Info("Done executing APIServiceAction") a.logger.Infof("Skipping restore of APIService as it is managed by Kubernetes") - return velero.NewRestoreItemActionExecuteOutput(input.Item).WithoutRestore(), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item).WithoutRestore(), nil } diff --git a/pkg/restore/apiservice_action_test.go b/pkg/restore/apiservice_action_test.go index 81f4a6171c..1f9baeae92 100644 --- a/pkg/restore/apiservice_action_test.go +++ b/pkg/restore/apiservice_action_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -40,7 +40,7 @@ func TestAPIServiceActionExecuteSkipsRestore(t *testing.T) { require.NoError(t, err) action := NewAPIServiceAction(velerotest.NewLogger()) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: unstructuredAPIService}, ItemFromBackup: &unstructured.Unstructured{Object: unstructuredAPIService}, }) diff --git a/pkg/restore/change_pvc_node_selector.go b/pkg/restore/change_pvc_node_selector.go index b4d947c21f..23a3c20c90 100644 --- a/pkg/restore/change_pvc_node_selector.go +++ b/pkg/restore/change_pvc_node_selector.go @@ -26,8 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // ChangePVCNodeSelectorAction updates/reset PVC's node selector @@ -61,23 +62,23 @@ func (p *ChangePVCNodeSelectorAction) AppliesTo() (velero.ResourceSelector, erro // Execute updates the pvc's selected-node annotation: // a) if node mapping found in the config map for the plugin // b) if node mentioned in annotation doesn't exist -func (p *ChangePVCNodeSelectorAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (p *ChangePVCNodeSelectorAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { p.logger.Info("Executing ChangePVCNodeSelectorAction") defer p.logger.Info("Done executing ChangePVCNodeSelectorAction") typeAcc, err := meta.TypeAccessor(input.Item) if err != nil { - return &velero.RestoreItemActionExecuteOutput{}, err + return &riav1.RestoreItemActionExecuteOutput{}, err } metadata, err := meta.Accessor(input.Item) if err != nil { - return &velero.RestoreItemActionExecuteOutput{}, err + return &riav1.RestoreItemActionExecuteOutput{}, err } annotations := metadata.GetAnnotations() if annotations == nil { - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } log := p.logger.WithFields(map[string]interface{}{ @@ -90,7 +91,7 @@ func (p *ChangePVCNodeSelectorAction) Execute(input *velero.RestoreItemActionExe node, ok := annotations["volume.kubernetes.io/selected-node"] if !ok { log.Debug("PVC doesn't have node selector") - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } // fetch node mapping from configMap @@ -105,7 +106,7 @@ func (p *ChangePVCNodeSelectorAction) Execute(input *velero.RestoreItemActionExe annotations["volume.kubernetes.io/selected-node"] = newNode metadata.SetAnnotations(annotations) log.Infof("Updating selected-node to %s from %s", newNode, node) - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } // configMap doesn't have node-mapping @@ -125,12 +126,12 @@ func (p *ChangePVCNodeSelectorAction) Execute(input *velero.RestoreItemActionExe } } - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } func getNewNodeFromConfigMap(client corev1client.ConfigMapInterface, node string) (string, error) { // fetch node mapping from configMap - config, err := getPluginConfig(framework.PluginKindRestoreItemAction, "velero.io/change-pvc-node-selector", client) + config, err := getPluginConfig(common.PluginKindRestoreItemAction, "velero.io/change-pvc-node-selector", client) if err != nil { return "", err } diff --git a/pkg/restore/change_pvc_node_selector_test.go b/pkg/restore/change_pvc_node_selector_test.go index 8be3051bac..92a6f4b311 100644 --- a/pkg/restore/change_pvc_node_selector_test.go +++ b/pkg/restore/change_pvc_node_selector_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // TestChangePVCNodeSelectorActionExecute runs the ChangePVCNodeSelectorAction's Execute @@ -146,7 +146,7 @@ func TestChangePVCNodeSelectorActionExecute(t *testing.T) { unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvc) require.NoError(t, err) - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{ Object: unstructuredMap, }, diff --git a/pkg/restore/change_storageclass_action.go b/pkg/restore/change_storageclass_action.go index 5714a1a7f3..3349cc7925 100644 --- a/pkg/restore/change_storageclass_action.go +++ b/pkg/restore/change_storageclass_action.go @@ -29,8 +29,9 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" storagev1client "k8s.io/client-go/kubernetes/typed/storage/v1" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // ChangeStorageClassAction updates a PV or PVC's storage class name @@ -64,19 +65,19 @@ func (a *ChangeStorageClassAction) AppliesTo() (velero.ResourceSelector, error) // Execute updates the item's spec.storageClassName if a mapping is found // in the config map for the plugin. -func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *ChangeStorageClassAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing ChangeStorageClassAction") defer a.logger.Info("Done executing ChangeStorageClassAction") a.logger.Debug("Getting plugin config") - config, err := getPluginConfig(framework.PluginKindRestoreItemAction, "velero.io/change-storage-class", a.configMapClient) + config, err := getPluginConfig(common.PluginKindRestoreItemAction, "velero.io/change-storage-class", a.configMapClient) if err != nil { return nil, err } if config == nil || len(config.Data) == 0 { a.logger.Debug("No storage class mappings found") - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } obj, ok := input.Item.(*unstructured.Unstructured) @@ -99,7 +100,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut if len(sts.Spec.VolumeClaimTemplates) > 0 { for index, pvc := range sts.Spec.VolumeClaimTemplates { - exists, newStorageClass, err := a.isStorageClassExist(log, *pvc.Spec.StorageClassName, config) + exists, newStorageClass, err := a.isStorageClassExist(log, pvc.Spec.StorageClassName, config) if err != nil { return nil, err } else if !exists { @@ -124,11 +125,11 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut return nil, errors.Wrap(err, "error getting item's spec.storageClassName") } - exists, newStorageClass, err := a.isStorageClassExist(log, storageClass, config) + exists, newStorageClass, err := a.isStorageClassExist(log, &storageClass, config) if err != nil { return nil, err } else if !exists { - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } log.Infof("Updating item's storage class name to %s", newStorageClass) @@ -137,18 +138,18 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut return nil, errors.Wrap(err, "unable to set item's spec.storageClassName") } } - return velero.NewRestoreItemActionExecuteOutput(obj), nil + return riav1.NewRestoreItemActionExecuteOutput(obj), nil } -func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) { - if storageClass == "" { +func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass *string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) { + if storageClass == nil || *storageClass == "" { log.Debug("Item has no storage class specified") return false, "", nil } - newStorageClass, ok := cm.Data[storageClass] + newStorageClass, ok := cm.Data[*storageClass] if !ok { - log.Debugf("No mapping found for storage class %s", storageClass) + log.Debugf("No mapping found for storage class %s", *storageClass) return false, "", nil } diff --git a/pkg/restore/change_storageclass_action_test.go b/pkg/restore/change_storageclass_action_test.go index 65de052db2..12a663075f 100644 --- a/pkg/restore/change_storageclass_action_test.go +++ b/pkg/restore/change_storageclass_action_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // TestChangeStorageClassActionExecute runs the ChangeStorageClassAction's Execute @@ -245,7 +245,7 @@ func TestChangeStorageClassActionExecute(t *testing.T) { unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvOrPvcOrSTS) require.NoError(t, err) - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{ Object: unstructuredMap, }, diff --git a/pkg/restore/clusterrolebinding_action.go b/pkg/restore/clusterrolebinding_action.go index 851b13f098..eed52ba89c 100644 --- a/pkg/restore/clusterrolebinding_action.go +++ b/pkg/restore/clusterrolebinding_action.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // ClusterRoleBindingAction handle namespace remappings for role bindings @@ -41,10 +42,10 @@ func (a *ClusterRoleBindingAction) AppliesTo() (velero.ResourceSelector, error) }, nil } -func (a *ClusterRoleBindingAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *ClusterRoleBindingAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { namespaceMapping := input.Restore.Spec.NamespaceMapping if len(namespaceMapping) == 0 { - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil } clusterRoleBinding := new(rbac.ClusterRoleBinding) @@ -63,5 +64,5 @@ func (a *ClusterRoleBindingAction) Execute(input *velero.RestoreItemActionExecut return nil, errors.WithStack(err) } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } diff --git a/pkg/restore/clusterrolebinding_action_test.go b/pkg/restore/clusterrolebinding_action_test.go index cea1c57871..a68334565a 100644 --- a/pkg/restore/clusterrolebinding_action_test.go +++ b/pkg/restore/clusterrolebinding_action_test.go @@ -28,6 +28,7 @@ import ( api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -89,7 +90,7 @@ func TestClusterRoleBindingActionExecute(t *testing.T) { require.NoError(t, err) action := NewClusterRoleBindingAction(test.NewLogger()) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: roleBindingUnstructured}, ItemFromBackup: &unstructured.Unstructured{Object: roleBindingUnstructured}, Restore: &api.Restore{ diff --git a/pkg/restore/crd_v1_preserve_unknown_fields_action.go b/pkg/restore/crd_v1_preserve_unknown_fields_action.go index f67d47910d..a9d8768056 100644 --- a/pkg/restore/crd_v1_preserve_unknown_fields_action.go +++ b/pkg/restore/crd_v1_preserve_unknown_fields_action.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // CRDV1PreserveUnknownFieldsAction will take a CRD and inspect it for the API version and the PreserveUnknownFields value. @@ -45,7 +46,7 @@ func (c *CRDV1PreserveUnknownFieldsAction) AppliesTo() (velero.ResourceSelector, }, nil } -func (c *CRDV1PreserveUnknownFieldsAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (c *CRDV1PreserveUnknownFieldsAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { c.logger.Info("Executing CRDV1PreserveUnknownFieldsAction") name, _, err := unstructured.NestedString(input.Item.UnstructuredContent(), "name") @@ -62,7 +63,7 @@ func (c *CRDV1PreserveUnknownFieldsAction) Execute(input *velero.RestoreItemActi // We don't want to "fix" anything in beta CRDs at the moment, just v1 versions with preserveunknownfields = true if version != "apiextensions.k8s.io/v1" { - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, }, nil } @@ -102,7 +103,7 @@ func (c *CRDV1PreserveUnknownFieldsAction) Execute(input *velero.RestoreItemActi return nil, errors.Wrap(err, "unable to convert crd to runtime.Unstructured") } - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: &unstructured.Unstructured{Object: res}, }, nil } diff --git a/pkg/restore/crd_v1_preserve_unknown_fields_action_test.go b/pkg/restore/crd_v1_preserve_unknown_fields_action_test.go index 77045b596d..3840918ad5 100644 --- a/pkg/restore/crd_v1_preserve_unknown_fields_action_test.go +++ b/pkg/restore/crd_v1_preserve_unknown_fields_action_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -48,6 +48,6 @@ func TestExecuteForACRDWithAnIntOnAFloat64FieldShouldWork(t *testing.T) { a := NewCRDV1PreserveUnknownFieldsAction(test.NewLogger()) - _, err = a.Execute(&velero.RestoreItemActionExecuteInput{Item: &u}) + _, err = a.Execute(&riav1.RestoreItemActionExecuteInput{Item: &u}) require.NoError(t, err) } diff --git a/pkg/restore/init_restorehook_pod_action.go b/pkg/restore/init_restorehook_pod_action.go index f994d811de..cb72e14d4f 100644 --- a/pkg/restore/init_restorehook_pod_action.go +++ b/pkg/restore/init_restorehook_pod_action.go @@ -24,6 +24,7 @@ import ( "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // InitRestoreHookPodAction is a RestoreItemAction plugin applicable to pods that runs @@ -45,7 +46,7 @@ func (a *InitRestoreHookPodAction) AppliesTo() (velero.ResourceSelector, error) } // Execute implements the RestoreItemAction plugin interface method. -func (a *InitRestoreHookPodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *InitRestoreHookPodAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Infof("Executing InitRestoreHookPodAction") // handle any init container restore hooks for the pod restoreHooks, err := hook.GetRestoreHooksFromSpec(&input.Restore.Spec.Hooks) @@ -60,5 +61,5 @@ func (a *InitRestoreHookPodAction) Execute(input *velero.RestoreItemActionExecut } a.logger.Infof("Returning from InitRestoreHookPodAction") - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: postHooksItem.UnstructuredContent()}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: postHooksItem.UnstructuredContent()}), nil } diff --git a/pkg/restore/init_restorehook_pod_action_test.go b/pkg/restore/init_restorehook_pod_action_test.go index c69d3c23f9..058865660a 100644 --- a/pkg/restore/init_restorehook_pod_action_test.go +++ b/pkg/restore/init_restorehook_pod_action_test.go @@ -29,7 +29,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/kuberesource" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -125,7 +125,7 @@ func TestInitContainerRestoreHookPodActionExecute(t *testing.T) { unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.obj) require.NoError(t, err) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: unstructuredPod}, ItemFromBackup: &unstructured.Unstructured{Object: unstructuredPod}, Restore: tc.restore, diff --git a/pkg/restore/job_action.go b/pkg/restore/job_action.go index fbaf30b249..4e20f9cc92 100644 --- a/pkg/restore/job_action.go +++ b/pkg/restore/job_action.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) type JobAction struct { @@ -40,7 +41,7 @@ func (a *JobAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *JobAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *JobAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { job := new(batchv1api.Job) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), job); err != nil { return nil, errors.WithStack(err) @@ -56,5 +57,5 @@ func (a *JobAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler return nil, errors.WithStack(err) } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } diff --git a/pkg/restore/job_action_test.go b/pkg/restore/job_action_test.go index 606dd089d8..0b15bc47f0 100644 --- a/pkg/restore/job_action_test.go +++ b/pkg/restore/job_action_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -138,7 +138,7 @@ func TestJobActionExecute(t *testing.T) { unstructuredJob, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.obj) require.NoError(t, err) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: unstructuredJob}, ItemFromBackup: &unstructured.Unstructured{Object: unstructuredJob}, Restore: nil, diff --git a/pkg/restore/pod_action.go b/pkg/restore/pod_action.go index d4bdc13847..35d9d75220 100644 --- a/pkg/restore/pod_action.go +++ b/pkg/restore/pod_action.go @@ -27,6 +27,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) type PodAction struct { @@ -43,7 +44,7 @@ func (a *PodAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *PodAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { pod := new(v1.Pod) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), pod); err != nil { return nil, errors.WithStack(err) @@ -86,7 +87,7 @@ func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler if err != nil { return nil, errors.WithStack(err) } - restoreExecuteOutput := velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}) + restoreExecuteOutput := riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}) if pod.Spec.PriorityClassName != "" { a.logger.Infof("Adding priorityclass %s to AdditionalItems", pod.Spec.PriorityClassName) restoreExecuteOutput.AdditionalItems = []velero.ResourceIdentifier{ diff --git a/pkg/restore/pod_action_test.go b/pkg/restore/pod_action_test.go index f1aa83c1a3..c8121df673 100644 --- a/pkg/restore/pod_action_test.go +++ b/pkg/restore/pod_action_test.go @@ -28,6 +28,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -229,7 +230,7 @@ func TestPodActionExecute(t *testing.T) { unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.obj) require.NoError(t, err) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: unstructuredPod}, ItemFromBackup: &unstructured.Unstructured{Object: unstructuredPod}, Restore: nil, diff --git a/pkg/restore/prioritize_group_version.go b/pkg/restore/prioritize_group_version.go index 1fde8b6058..d4ad9331c5 100644 --- a/pkg/restore/prioritize_group_version.go +++ b/pkg/restore/prioritize_group_version.go @@ -339,7 +339,7 @@ func findSupportedUserVersion(userGVs, targetGVs, sourceGVs []metav1.GroupVersio return "" } -// versionsContain will check if a version can be found in a a slice of versions. +// versionsContain will check if a version can be found in a slice of versions. func versionsContain(list []metav1.GroupVersionForDiscovery, version string) bool { for _, v := range list { if v.Version == version { diff --git a/pkg/restore/pv_restorer.go b/pkg/restore/pv_restorer.go index ff8d36b347..ce3ab1e1d9 100644 --- a/pkg/restore/pv_restorer.go +++ b/pkg/restore/pv_restorer.go @@ -21,6 +21,7 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/vmware-tanzu/velero/internal/credentials" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" "github.com/vmware-tanzu/velero/pkg/util/boolptr" @@ -39,6 +40,7 @@ type pvRestorer struct { volumeSnapshots []*volume.Snapshot volumeSnapshotterGetter VolumeSnapshotterGetter snapshotLocationLister listers.VolumeSnapshotLocationLister + credentialFileStore credentials.FileStore } func (r *pvRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { @@ -59,7 +61,7 @@ func (r *pvRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructu log := r.logger.WithFields(logrus.Fields{"persistentVolume": pvName}) - snapshotInfo, err := getSnapshotInfo(pvName, r.backup, r.volumeSnapshots, r.snapshotLocationLister) + snapshotInfo, err := getSnapshotInfo(pvName, r.backup, r.volumeSnapshots, r.snapshotLocationLister, r.credentialFileStore, r.logger) if err != nil { return nil, err } @@ -103,7 +105,7 @@ type snapshotInfo struct { location *api.VolumeSnapshotLocation } -func getSnapshotInfo(pvName string, backup *api.Backup, volumeSnapshots []*volume.Snapshot, snapshotLocationLister listers.VolumeSnapshotLocationLister) (*snapshotInfo, error) { +func getSnapshotInfo(pvName string, backup *api.Backup, volumeSnapshots []*volume.Snapshot, snapshotLocationLister listers.VolumeSnapshotLocationLister, credentialStore credentials.FileStore, logger logrus.FieldLogger) (*snapshotInfo, error) { var pvSnapshot *volume.Snapshot for _, snapshot := range volumeSnapshots { if snapshot.Spec.PersistentVolumeName == pvName { @@ -120,6 +122,11 @@ func getSnapshotInfo(pvName string, backup *api.Backup, volumeSnapshots []*volum if err != nil { return nil, errors.WithStack(err) } + // add credential to config + err = volume.UpdateVolumeSnapshotLocationWithCredentialConfig(loc, credentialStore, logger) + if err != nil { + return nil, errors.WithStack(err) + } return &snapshotInfo{ providerSnapshotID: pvSnapshot.Status.ProviderSnapshotID, diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index c4fb0a9db8..f54ff0f5b5 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -29,8 +29,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" - providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks" + providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/volumesnapshotter/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -187,7 +187,7 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { t.Run(tc.name, func(t *testing.T) { var ( volumeSnapshotter = new(providermocks.VolumeSnapshotter) - volumeSnapshotterGetter = providerToVolumeSnapshotterMap(map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter = providerToVolumeSnapshotterMap(map[string]vsv1.VolumeSnapshotter{ tc.expectedProvider: volumeSnapshotter, }) locationsInformer = informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0).Velero().V1().VolumeSnapshotLocations() @@ -217,9 +217,9 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { } } -type providerToVolumeSnapshotterMap map[string]velero.VolumeSnapshotter +type providerToVolumeSnapshotterMap map[string]vsv1.VolumeSnapshotter -func (g providerToVolumeSnapshotterMap) GetVolumeSnapshotter(provider string) (velero.VolumeSnapshotter, error) { +func (g providerToVolumeSnapshotterMap) GetVolumeSnapshotter(provider string) (vsv1.VolumeSnapshotter, error) { if bs, ok := g[provider]; !ok { return nil, errors.New("volume snapshotter not found for provider") } else { diff --git a/pkg/restore/restic_restore_action.go b/pkg/restore/restic_restore_action.go index ba9f9cb0a3..bf7116b8b9 100644 --- a/pkg/restore/restic_restore_action.go +++ b/pkg/restore/restic_restore_action.go @@ -34,10 +34,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" - "github.com/vmware-tanzu/velero/pkg/plugin/framework" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" - "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -67,7 +67,7 @@ func (a *ResticRestoreAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *ResticRestoreAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing ResticRestoreAction") defer a.logger.Info("Done executing ResticRestoreAction") @@ -100,7 +100,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu volumeSnapshots := podvolume.GetVolumeBackupsForPod(podVolumeBackups, &pod, podFromBackup.Namespace) if len(volumeSnapshots) == 0 { log.Debug("No restic backups found for pod") - return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + return riav1.NewRestoreItemActionExecuteOutput(input.Item), nil } log.Info("Restic backups for pod found") @@ -108,7 +108,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu // TODO we might want/need to get plugin config at the top of this method at some point; for now, wait // until we know we're doing a restore before getting config. log.Debugf("Getting plugin config") - config, err := getPluginConfig(framework.PluginKindRestoreItemAction, "velero.io/restic", a.client) + config, err := getPluginConfig(common.PluginKindRestoreItemAction, "velero.io/restic", a.client) if err != nil { return nil, err } @@ -161,7 +161,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu initContainerBuilder.Command(getCommand(log, config)) initContainer := *initContainerBuilder.Result() - if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != restic.InitContainer { + if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != podvolume.InitContainer { pod.Spec.InitContainers = append([]corev1.Container{initContainer}, pod.Spec.InitContainers...) } else { pod.Spec.InitContainers[0] = initContainer @@ -172,7 +172,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu return nil, errors.Wrap(err, "unable to convert pod to runtime.Unstructured") } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } func getCommand(log logrus.FieldLogger, config *corev1.ConfigMap) []string { @@ -262,7 +262,7 @@ func getSecurityContext(log logrus.FieldLogger, config *corev1.ConfigMap) (strin // TODO eventually this can move to pkg/plugin/framework since it'll be used across multiple // plugins. -func getPluginConfig(kind framework.PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1.ConfigMap, error) { +func getPluginConfig(kind common.PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1.ConfigMap, error) { opts := metav1.ListOptions{ // velero.io/plugin-config: true // velero.io/restic: RestoreItemAction @@ -290,7 +290,7 @@ func getPluginConfig(kind framework.PluginKind, name string, client corev1client } func newResticInitContainerBuilder(image, restoreUID string) *builder.ContainerBuilder { - return builder.ForContainer(restic.InitContainer, image). + return builder.ForContainer(podvolume.InitContainer, image). Args(restoreUID). Env([]*corev1.EnvVar{ { diff --git a/pkg/restore/restic_restore_action_test.go b/pkg/restore/restic_restore_action_test.go index b218f4b88d..660c1fc66c 100644 --- a/pkg/restore/restic_restore_action_test.go +++ b/pkg/restore/restic_restore_action_test.go @@ -35,7 +35,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/buildinfo" velerofake "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -278,7 +278,7 @@ func TestResticRestoreActionExecute(t *testing.T) { unstructuredPodFromBackup = unstructuredPod } - input := &velero.RestoreItemActionExecuteInput{ + input := &riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{ Object: unstructuredPod, }, diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 11008b55e8..8705ae2127 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -46,6 +46,7 @@ import ( corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/hook" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/archive" @@ -58,6 +59,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podexec" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" @@ -68,7 +71,7 @@ import ( ) type VolumeSnapshotterGetter interface { - GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) + GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) } type Request struct { @@ -85,7 +88,7 @@ type Request struct { type Restorer interface { // Restore restores the backup data from backupReader, returning warnings and errors. Restore(req Request, - actions []velero.RestoreItemAction, + actions []riav1.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, ) (Result, Result) @@ -113,6 +116,7 @@ type kubernetesRestorer struct { logger logrus.FieldLogger podCommandExecutor podexec.PodCommandExecutor podGetter cache.Getter + credentialFileStore credentials.FileStore } // NewKubernetesRestorer creates a new kubernetesRestorer. @@ -128,6 +132,7 @@ func NewKubernetesRestorer( logger logrus.FieldLogger, podCommandExecutor podexec.PodCommandExecutor, podGetter cache.Getter, + credentialStore credentials.FileStore, ) (Restorer, error) { return &kubernetesRestorer{ restoreClient: restoreClient, @@ -147,9 +152,10 @@ func NewKubernetesRestorer( veleroCloneName := "velero-clone-" + veleroCloneUuid.String() return veleroCloneName, nil }, - fileSystem: filesystem.NewFileSystem(), - podCommandExecutor: podCommandExecutor, - podGetter: podGetter, + fileSystem: filesystem.NewFileSystem(), + podCommandExecutor: podCommandExecutor, + podGetter: podGetter, + credentialFileStore: credentialStore, }, nil } @@ -158,7 +164,7 @@ func NewKubernetesRestorer( // respectively, summarizing info about the restore. func (kr *kubernetesRestorer) Restore( req Request, - actions []velero.RestoreItemAction, + actions []riav1.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, ) (Result, Result) { @@ -222,12 +228,12 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( Includes(req.Restore.Spec.IncludedNamespaces...). Excludes(req.Restore.Spec.ExcludedNamespaces...) - resolvedActions, err := restoreItemActionResolver.ResolveActions(kr.discoveryHelper) + resolvedActions, err := restoreItemActionResolver.ResolveActions(kr.discoveryHelper, kr.logger) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } - resolvedItemSnapshotterActions, err := itemSnapshotterResolver.ResolveActions(kr.discoveryHelper) + resolvedItemSnapshotterActions, err := itemSnapshotterResolver.ResolveActions(kr.discoveryHelper, kr.logger) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } @@ -276,6 +282,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( volumeSnapshots: req.VolumeSnapshots, volumeSnapshotterGetter: volumeSnapshotterGetter, snapshotLocationLister: snapshotLocationLister, + credentialFileStore: kr.credentialFileStore, } restoreCtx := &restoreContext{ @@ -403,6 +410,12 @@ func (ctx *restoreContext) execute() (Result, Result) { ctx.restoreDir = dir backupResources, err := archive.NewParser(ctx.log, ctx.fileSystem).Parse(ctx.restoreDir) + // If ErrNotExist occurs, it implies that the backup to be restored includes zero items. + // Need to add a warning about it and jump out of the function. + if errors.Cause(err) == archive.ErrNotExist { + warnings.AddVeleroError(errors.Wrap(err, "zero items to be restored")) + return warnings, errs + } if err != nil { errs.AddVeleroError(errors.Wrap(err, "error parsing backup contents")) return warnings, errs @@ -1139,7 +1152,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } ctx.log.Infof("Executing item action for %v", &groupResource) - executeOutput, err := action.RestoreItemAction.Execute(&velero.RestoreItemActionExecuteInput{ + executeOutput, err := action.RestoreItemAction.Execute(&riav1.RestoreItemActionExecuteInput{ Item: obj, ItemFromBackup: itemFromBackup, Restore: ctx.restore, @@ -1249,13 +1262,25 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso errs.Add(namespace, err) return warnings, errs } - if isAlreadyExistsError { - fromCluster, err := resourceClient.Get(name, metav1.GetOptions{}) - if err != nil { - ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err) - warnings.Add(namespace, err) + + // check if we want to treat the error as a warning, in some cases the creation call might not get executed due to object API validations + // and Velero might not get the already exists error type but in reality the object already exists + var fromCluster *unstructured.Unstructured + + if restoreErr != nil { + // check for the existence of the object in cluster, if no error then it implies that object exists + // and if err then we want to judge whether there is an existing error in the previous creation. + // if so, we will return the 'get' error. + // otherwise, we will return the original creation error. + fromCluster, err = resourceClient.Get(name, metav1.GetOptions{}) + if err != nil && isAlreadyExistsError { + ctx.log.Errorf("Error retrieving in-cluster version of %s: %v", kube.NamespaceAndName(obj), err) + errs.Add(namespace, err) return warnings, errs } + } + + if fromCluster != nil { // Remove insubstantial metadata. fromCluster, err = resetMetadataAndStatus(fromCluster) if err != nil { @@ -1443,7 +1468,7 @@ func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, e } } - // the "already allocated" error may caused by other services, check whether the expected service exists or not + // the "already allocated" error may be caused by other services, check whether the expected service exists or not if _, err = client.Get(obj.GetName(), metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { ctx.log.Debugf("Service %s not found", kube.NamespaceAndName(obj)) @@ -2024,7 +2049,7 @@ func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterW // try patching the in-cluster resource (resource diff plus latest backup/restore labels) _, err = resourceClient.Patch(obj.GetName(), patchBytes) if err != nil { - ctx.log.Errorf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err) + ctx.log.Warnf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err) warnings.Add(namespace, err) // try just patching the labels warningsFromUpdate, errsFromUpdate := ctx.updateBackupRestoreLabels(fromCluster, fromClusterWithLabels, namespace, resourceClient) diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 404d45e1a5..f1575bc185 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -48,6 +48,8 @@ import ( velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" + vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" uploadermocks "github.com/vmware-tanzu/velero/pkg/podvolume/mocks" "github.com/vmware-tanzu/velero/pkg/test" @@ -762,14 +764,15 @@ func TestInvalidTarballContents(t *testing.T) { tarball io.Reader want map[*test.APIResource][]string wantErrs Result + wantWarnings Result }{ { - name: "empty tarball returns an error", + name: "empty tarball returns a warning", restore: defaultRestore().Result(), backup: defaultBackup().Result(), tarball: test.NewTarWriter(t). Done(), - wantErrs: Result{ + wantWarnings: Result{ Velero: []string{archive.ErrNotExist.Error()}, }, }, @@ -820,33 +823,32 @@ func TestInvalidTarballContents(t *testing.T) { nil, // snapshot location lister nil, // volume snapshotter getter ) - - assertEmptyResults(t, warnings) - assertWantErrs(t, tc.wantErrs, errs) + assertWantErrsOrWarnings(t, tc.wantWarnings, warnings) + assertWantErrsOrWarnings(t, tc.wantErrs, errs) assertAPIContents(t, h, tc.want) }) } } -func assertWantErrs(t *testing.T, wantErrRes Result, errRes Result) { +func assertWantErrsOrWarnings(t *testing.T, wantRes Result, res Result) { t.Helper() - if wantErrRes.Velero != nil { - assert.Equal(t, len(wantErrRes.Velero), len(errRes.Velero)) - for i := range errRes.Velero { - assert.Contains(t, errRes.Velero[i], wantErrRes.Velero[i]) + if wantRes.Velero != nil { + assert.Equal(t, len(wantRes.Velero), len(res.Velero)) + for i := range res.Velero { + assert.Contains(t, res.Velero[i], wantRes.Velero[i]) } } - if wantErrRes.Namespaces != nil { - assert.Equal(t, len(wantErrRes.Namespaces), len(errRes.Namespaces)) - for ns := range errRes.Namespaces { - assert.Equal(t, len(wantErrRes.Namespaces[ns]), len(errRes.Namespaces[ns])) - for i := range errRes.Namespaces[ns] { - assert.Contains(t, errRes.Namespaces[ns][i], wantErrRes.Namespaces[ns][i]) + if wantRes.Namespaces != nil { + assert.Equal(t, len(wantRes.Namespaces), len(res.Namespaces)) + for ns := range res.Namespaces { + assert.Equal(t, len(wantRes.Namespaces[ns]), len(res.Namespaces[ns])) + for i := range res.Namespaces[ns] { + assert.Contains(t, res.Namespaces[ns][i], wantRes.Namespaces[ns][i]) } } } - if wantErrRes.Cluster != nil { - assert.Equal(t, wantErrRes.Cluster, errRes.Cluster) + if wantRes.Cluster != nil { + assert.Equal(t, wantRes.Cluster, res.Cluster) } } @@ -1147,17 +1149,17 @@ func (a *recordResourcesAction) AppliesTo() (velero.ResourceSelector, error) { return a.selector, nil } -func (a *recordResourcesAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *recordResourcesAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { metadata, err := meta.Accessor(input.Item) if err != nil { - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: a.additionalItems, }, err } a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: a.additionalItems, }, nil @@ -1318,7 +1320,7 @@ func TestRestoreActionsRunForCorrectItems(t *testing.T) { h.AddItems(t, r) } - actions := []velero.RestoreItemAction{} + actions := []riav1.RestoreItemAction{} for action := range tc.actions { actions = append(actions, action) } @@ -1353,12 +1355,12 @@ func TestRestoreActionsRunForCorrectItems(t *testing.T) { // function body at runtime. type pluggableAction struct { selector velero.ResourceSelector - executeFunc func(*velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) + executeFunc func(*riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) } -func (a *pluggableAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *pluggableAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { if a.executeFunc == nil { - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, }, nil } @@ -1383,7 +1385,7 @@ func TestRestoreActionModifications(t *testing.T) { // method modifies the item being passed in by calling the 'modify' function on it. modifyingActionGetter := func(modify func(*unstructured.Unstructured)) *pluggableAction { return &pluggableAction{ - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { obj, ok := input.Item.(*unstructured.Unstructured) if !ok { return nil, errors.Errorf("unexpected type %T", input.Item) @@ -1392,7 +1394,7 @@ func TestRestoreActionModifications(t *testing.T) { res := obj.DeepCopy() modify(res) - return &velero.RestoreItemActionExecuteOutput{ + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: res, }, nil }, @@ -1405,7 +1407,7 @@ func TestRestoreActionModifications(t *testing.T) { backup *velerov1api.Backup apiResources []*test.APIResource tarball io.Reader - actions []velero.RestoreItemAction + actions []riav1.RestoreItemAction want []*test.APIResource }{ { @@ -1414,7 +1416,7 @@ func TestRestoreActionModifications(t *testing.T) { backup: defaultBackup().Result(), tarball: test.NewTarWriter(t).AddItems("pods", builder.ForPod("ns-1", "pod-1").Result()).Done(), apiResources: []*test.APIResource{test.Pods()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetLabels(map[string]string{"updated": "true"}) }), @@ -1431,7 +1433,7 @@ func TestRestoreActionModifications(t *testing.T) { backup: defaultBackup().Result(), tarball: test.NewTarWriter(t).AddItems("pods", builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("should-be-removed", "true")).Result()).Done(), apiResources: []*test.APIResource{test.Pods()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetLabels(nil) }), @@ -1446,7 +1448,7 @@ func TestRestoreActionModifications(t *testing.T) { backup: defaultBackup().Result(), tarball: test.NewTarWriter(t).AddItems("pods", builder.ForPod("ns-1", "pod-1").Result()).Done(), apiResources: []*test.APIResource{test.Pods()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ modifyingActionGetter(func(item *unstructured.Unstructured) { item.SetLabels(map[string]string{"updated": "true"}) }).addSelector(velero.ResourceSelector{ @@ -1518,7 +1520,7 @@ func TestRestoreActionAdditionalItems(t *testing.T) { backup *velerov1api.Backup tarball io.Reader apiResources []*test.APIResource - actions []velero.RestoreItemAction + actions []riav1.RestoreItemAction want map[*test.APIResource][]string }{ { @@ -1527,11 +1529,11 @@ func TestRestoreActionAdditionalItems(t *testing.T) { backup: defaultBackup().Result(), tarball: test.NewTarWriter(t).AddItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).Done(), apiResources: []*test.APIResource{test.Pods()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ &pluggableAction{ selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - return &velero.RestoreItemActionExecuteOutput{ + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{ {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, @@ -1550,10 +1552,10 @@ func TestRestoreActionAdditionalItems(t *testing.T) { backup: defaultBackup().Result(), tarball: test.NewTarWriter(t).AddItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).Done(), apiResources: []*test.APIResource{test.Pods()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ &pluggableAction{ - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - return &velero.RestoreItemActionExecuteOutput{ + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{ {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, @@ -1575,10 +1577,10 @@ func TestRestoreActionAdditionalItems(t *testing.T) { AddItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). Done(), apiResources: []*test.APIResource{test.Pods(), test.PVs()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ &pluggableAction{ - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - return &velero.RestoreItemActionExecuteOutput{ + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, @@ -1601,10 +1603,10 @@ func TestRestoreActionAdditionalItems(t *testing.T) { AddItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). Done(), apiResources: []*test.APIResource{test.Pods(), test.PVs()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ &pluggableAction{ - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - return &velero.RestoreItemActionExecuteOutput{ + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, @@ -1627,10 +1629,10 @@ func TestRestoreActionAdditionalItems(t *testing.T) { AddItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). Done(), apiResources: []*test.APIResource{test.Pods(), test.PVs()}, - actions: []velero.RestoreItemAction{ + actions: []riav1.RestoreItemAction{ &pluggableAction{ - executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - return &velero.RestoreItemActionExecuteOutput{ + executeFunc: func(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { + return &riav1.RestoreItemActionExecuteOutput{ UpdatedItem: input.Item, AdditionalItems: []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, @@ -1875,10 +1877,10 @@ func assertRestoredItems(t *testing.T, h *harness, want []*test.APIResource) { } // volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter -// interface that returns velero.VolumeSnapshotters from a map if they exist. -type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter +// interface that returns vsv1.VolumeSnapshotters from a map if they exist. +type volumeSnapshotterGetter map[string]vsv1.VolumeSnapshotter -func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { +func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (vsv1.VolumeSnapshotter, error) { snapshotter, ok := vsg[name] if !ok { return nil, errors.New("volume snapshotter not found") @@ -1887,7 +1889,7 @@ func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.Vol return snapshotter, nil } -// volumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface +// volumeSnapshotter is a test fake for the vsv1.VolumeSnapshotter interface type volumeSnapshotter struct { // a map from snapshotID to volumeID snapshotVolumes map[string]string @@ -2051,7 +2053,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, }, @@ -2100,7 +2102,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, }, @@ -2154,7 +2156,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ // the volume snapshotter fake is not configured with any snapshotID -> volumeID // mappings as a way to verify that the snapshot is not restored, since if it were // restored, we'd get an error of "snapshot not found". @@ -2206,7 +2208,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ // the volume snapshotter fake is not configured with any snapshotID -> volumeID // mappings as a way to verify that the snapshot is not restored, since if it were // restored, we'd get an error of "snapshot not found". @@ -2257,7 +2259,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, }, @@ -2318,7 +2320,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, }, @@ -2466,7 +2468,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-pvname"}, pvName: map[string]string{"new-pvname": "new-pvname"}, @@ -2538,7 +2540,7 @@ func TestRestorePersistentVolumes(t *testing.T) { }, }, }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ // the volume snapshotter fake is not configured with any snapshotID -> volumeID // mappings as a way to verify that the snapshot is not restored, since if it were // restored, we'd get an error of "snapshot not found". @@ -2590,7 +2592,7 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), }, - volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ "provider-1": &volumeSnapshotter{ snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, pvName: map[string]string{"new-volume": "volumesnapshotter-renamed-source-pv"}, diff --git a/pkg/restore/rolebinding_action.go b/pkg/restore/rolebinding_action.go index c402075a61..55820c21f9 100644 --- a/pkg/restore/rolebinding_action.go +++ b/pkg/restore/rolebinding_action.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" ) // RoleBindingAction handle namespace remappings for role bindings @@ -41,10 +42,10 @@ func (a *RoleBindingAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *RoleBindingAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *RoleBindingAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { namespaceMapping := input.Restore.Spec.NamespaceMapping if len(namespaceMapping) == 0 { - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil } roleBinding := new(rbac.RoleBinding) @@ -63,5 +64,5 @@ func (a *RoleBindingAction) Execute(input *velero.RestoreItemActionExecuteInput) return nil, errors.WithStack(err) } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } diff --git a/pkg/restore/rolebinding_action_test.go b/pkg/restore/rolebinding_action_test.go index 8995df8c6d..2a62cf6d0d 100644 --- a/pkg/restore/rolebinding_action_test.go +++ b/pkg/restore/rolebinding_action_test.go @@ -28,6 +28,7 @@ import ( api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -89,7 +90,7 @@ func TestRoleBindingActionExecute(t *testing.T) { require.NoError(t, err) action := NewRoleBindingAction(test.NewLogger()) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: roleBindingUnstructured}, ItemFromBackup: &unstructured.Unstructured{Object: roleBindingUnstructured}, Restore: &api.Restore{ diff --git a/pkg/restore/service_account_action.go b/pkg/restore/service_account_action.go index 252d9fc576..424985a26e 100644 --- a/pkg/restore/service_account_action.go +++ b/pkg/restore/service_account_action.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -43,7 +44,7 @@ func (a *ServiceAccountAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *ServiceAccountAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *ServiceAccountAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { a.logger.Info("Executing ServiceAccountAction") defer a.logger.Info("Done executing ServiceAccountAction") @@ -75,5 +76,5 @@ func (a *ServiceAccountAction) Execute(input *velero.RestoreItemActionExecuteInp return nil, errors.Wrap(err, "unable to convert serviceaccount to runtime.Unstructured") } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } diff --git a/pkg/restore/service_account_action_test.go b/pkg/restore/service_account_action_test.go index 0bc976a0fa..f8bc76fcc8 100644 --- a/pkg/restore/service_account_action_test.go +++ b/pkg/restore/service_account_action_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/test" ) @@ -90,7 +91,7 @@ func TestServiceAccountActionExecute(t *testing.T) { require.NoError(t, err) action := NewServiceAccountAction(test.NewLogger()) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: saUnstructured}, ItemFromBackup: &unstructured.Unstructured{Object: saUnstructured}, Restore: nil, diff --git a/pkg/restore/service_action.go b/pkg/restore/service_action.go index 0b22cf0959..d21690182e 100644 --- a/pkg/restore/service_action.go +++ b/pkg/restore/service_action.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" "github.com/vmware-tanzu/velero/pkg/util/boolptr" ) @@ -47,7 +48,7 @@ func (a *ServiceAction) AppliesTo() (velero.ResourceSelector, error) { }, nil } -func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { +func (a *ServiceAction) Execute(input *riav1.RestoreItemActionExecuteInput) (*riav1.RestoreItemActionExecuteOutput, error) { service := new(corev1api.Service) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), service); err != nil { return nil, errors.WithStack(err) @@ -72,7 +73,7 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v return nil, errors.WithStack(err) } - return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil + return riav1.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } func deleteNodePorts(service *corev1api.Service) error { diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index 59fef00a15..ccdef0729a 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -29,7 +29,7 @@ import ( api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" + riav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/restoreitemaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -377,7 +377,7 @@ func TestServiceActionExecute(t *testing.T) { unstructuredSvc, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.obj) require.NoError(t, err) - res, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + res, err := action.Execute(&riav1.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{Object: unstructuredSvc}, ItemFromBackup: &unstructured.Unstructured{Object: unstructuredSvc}, Restore: test.restore, diff --git a/pkg/uploader/kopia/progress.go b/pkg/uploader/kopia/progress.go index 050d190dc3..d768181269 100644 --- a/pkg/uploader/kopia/progress.go +++ b/pkg/uploader/kopia/progress.go @@ -60,9 +60,9 @@ type KopiaProgress struct { estimatedFileCount int32 // +checklocksignore the total count of files to be processed estimatedTotalBytes int64 // +checklocksignore the total size of files to be processed // +checkatomic - processedBytes int64 // which statistic all bytes has been processed currently - outputThrottle Throttle // which control the frequency of update progress - UpFunc func(uploader.UploaderProgress) //which called by UpdateProgress func, it is used to update pvb or pvr status + processedBytes int64 // which statistic all bytes has been processed currently + outputThrottle Throttle // which control the frequency of update progress + Updater uploader.ProgressUpdater //which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update } //UploadedBytes the total bytes has uploaded currently @@ -90,13 +90,10 @@ func (p *KopiaProgress) EstimatedDataSize(fileCount int, totalBytes int64) { p.UpdateProgress() } -//UpdateProgress which called by UpdateProgress func, it is used to update pvb or pvr status +//UpdateProgress which calls Updater UpdateProgress interface, update progress by third-party implementation func (p *KopiaProgress) UpdateProgress() { if p.outputThrottle.ShouldOutput() { - p.UpFunc(uploader.UploaderProgress{ - TotalBytes: atomic.LoadInt64(&p.estimatedTotalBytes), - BytesDone: atomic.LoadInt64(&p.processedBytes), - }) + p.Updater.UpdateProgress(&uploader.UploaderProgress{TotalBytes: p.estimatedTotalBytes, BytesDone: p.processedBytes}) } } diff --git a/pkg/uploader/kopia/snapshot.go b/pkg/uploader/kopia/snapshot.go new file mode 100644 index 0000000000..904c235a1d --- /dev/null +++ b/pkg/uploader/kopia/snapshot.go @@ -0,0 +1,300 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopia + +import ( + "context" + "io/ioutil" + "math" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/util/logging" + + "github.com/kopia/kopia/fs" + "github.com/kopia/kopia/fs/localfs" + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/snapshot" + "github.com/kopia/kopia/snapshot/policy" + "github.com/kopia/kopia/snapshot/restore" + "github.com/kopia/kopia/snapshot/snapshotfs" + "github.com/pkg/errors" +) + +//All function mainly used to make testing more convenient +var treeForSourceFunc = policy.TreeForSource +var applyRetentionPolicyFunc = policy.ApplyRetentionPolicy +var setPolicyFunc = policy.SetPolicy +var saveSnapshotFunc = snapshot.SaveSnapshot +var loadSnapshotFunc = snapshot.LoadSnapshot + +//SnapshotUploader which mainly used for UT test that could overwrite Upload interface +type SnapshotUploader interface { + Upload( + ctx context.Context, + source fs.Entry, + policyTree *policy.Tree, + sourceInfo snapshot.SourceInfo, + previousManifests ...*snapshot.Manifest, + ) (*snapshot.Manifest, error) +} + +func newOptionalInt(b policy.OptionalInt) *policy.OptionalInt { + return &b +} + +//setupDefaultPolicy set default policy for kopia +func setupDefaultPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo) error { + return setPolicyFunc(ctx, rep, sourceInfo, &policy.Policy{ + RetentionPolicy: policy.RetentionPolicy{ + KeepLatest: newOptionalInt(math.MaxInt32), + }, + CompressionPolicy: policy.CompressionPolicy{ + CompressorName: "none", + }, + UploadPolicy: policy.UploadPolicy{ + MaxParallelFileReads: newOptionalInt(policy.OptionalInt(runtime.NumCPU())), + }, + SchedulingPolicy: policy.SchedulingPolicy{ + Manual: true, + }, + }) +} + +//Backup backup specific sourcePath and update progress +func Backup(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath string, + parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + if fsUploader == nil { + return nil, false, errors.New("get empty kopia uploader") + } + dir, err := filepath.Abs(sourcePath) + if err != nil { + return nil, false, errors.Wrapf(err, "Invalid source path '%s'", sourcePath) + } + + // to be consistent with restic when backup empty dir returns one error for upper logic handle + dirs, err := ioutil.ReadDir(dir) + if err != nil { + return nil, false, errors.Wrapf(err, "Unable to read dir in path %s", dir) + } else if len(dirs) == 0 { + return nil, true, nil + } + + sourceInfo := snapshot.SourceInfo{ + UserName: udmrepo.GetRepoUser(), + Host: udmrepo.GetRepoDomain(), + Path: filepath.Clean(dir), + } + rootDir, err := getLocalFSEntry(sourceInfo.Path) + if err != nil { + return nil, false, errors.Wrap(err, "Unable to get local filesystem entry") + } + + kopiaCtx := logging.SetupKopiaLog(ctx, log) + snapID, snapshotSize, err := SnapshotSource(kopiaCtx, repoWriter, fsUploader, sourceInfo, rootDir, parentSnapshot, log, "Kopia Uploader") + if err != nil { + return nil, false, err + } + + snapshotInfo := &uploader.SnapshotInfo{ + ID: snapID, + Size: snapshotSize, + } + + return snapshotInfo, false, nil +} + +func getLocalFSEntry(path0 string) (fs.Entry, error) { + path, err := resolveSymlink(path0) + if err != nil { + return nil, errors.Wrap(err, "resolveSymlink") + } + + e, err := localfs.NewEntry(path) + if err != nil { + return nil, errors.Wrap(err, "can't get local fs entry") + } + + return e, nil +} + +//resolveSymlink returns the path name after the evaluation of any symbolic links +func resolveSymlink(path string) (string, error) { + st, err := os.Lstat(path) + if err != nil { + return "", errors.Wrap(err, "stat") + } + + if (st.Mode() & os.ModeSymlink) == 0 { + return path, nil + } + + return filepath.EvalSymlinks(path) +} + +//SnapshotSource which setup policy for snapshot, upload snapshot, update progress +func SnapshotSource( + ctx context.Context, + rep repo.RepositoryWriter, + u SnapshotUploader, + sourceInfo snapshot.SourceInfo, + rootDir fs.Entry, + parentSnapshot string, + log logrus.FieldLogger, + description string, +) (string, int64, error) { + log.Info("Start to snapshot...") + snapshotStartTime := time.Now() + + var previous []*snapshot.Manifest + if parentSnapshot != "" { + mani, err := loadSnapshotFunc(ctx, rep, manifest.ID(parentSnapshot)) + if err != nil { + return "", 0, errors.Wrapf(err, "Failed to load previous snapshot %v from kopia", parentSnapshot) + } + + previous = append(previous, mani) + } else { + pre, err := findPreviousSnapshotManifest(ctx, rep, sourceInfo, nil) + if err != nil { + return "", 0, errors.Wrapf(err, "Failed to find previous kopia snapshot manifests for si %v", sourceInfo) + } + + previous = pre + } + var manifest *snapshot.Manifest + if err := setupDefaultPolicy(ctx, rep, sourceInfo); err != nil { + return "", 0, errors.Wrapf(err, "unable to set policy for si %v", sourceInfo) + } + + policyTree, err := treeForSourceFunc(ctx, rep, sourceInfo) + if err != nil { + return "", 0, errors.Wrapf(err, "unable to create policy getter for si %v", sourceInfo) + } + + manifest, err = u.Upload(ctx, rootDir, policyTree, sourceInfo, previous...) + if err != nil { + return "", 0, errors.Wrapf(err, "Failed to upload the kopia snapshot for si %v", sourceInfo) + } + + manifest.Description = description + + if _, err = saveSnapshotFunc(ctx, rep, manifest); err != nil { + return "", 0, errors.Wrapf(err, "Failed to save kopia manifest %v", manifest.ID) + } + _, err = applyRetentionPolicyFunc(ctx, rep, sourceInfo, true) + if err != nil { + return "", 0, errors.Wrapf(err, "Failed to apply kopia retention policy for si %v", sourceInfo) + } + if err = rep.Flush(ctx); err != nil { + return "", 0, errors.Wrapf(err, "Failed to flush kopia repository") + } + log.Infof("Created snapshot with root %v and ID %v in %v", manifest.RootObjectID(), manifest.ID, time.Since(snapshotStartTime).Truncate(time.Second)) + return reportSnapshotStatus(manifest) +} + +func reportSnapshotStatus(manifest *snapshot.Manifest) (string, int64, error) { + manifestID := manifest.ID + snapSize := manifest.Stats.TotalFileSize + + var errs []string + if ds := manifest.RootEntry.DirSummary; ds != nil { + for _, ent := range ds.FailedEntries { + errs = append(errs, ent.Error) + } + } + if len(errs) != 0 { + return "", 0, errors.New(strings.Join(errs, "\n")) + } + + return string(manifestID), snapSize, nil +} + +// findPreviousSnapshotManifest returns the list of previous snapshots for a given source, including +// last complete snapshot following it. +func findPreviousSnapshotManifest(ctx context.Context, rep repo.Repository, sourceInfo snapshot.SourceInfo, noLaterThan *time.Time) ([]*snapshot.Manifest, error) { + man, err := snapshot.ListSnapshots(ctx, rep, sourceInfo) + if err != nil { + return nil, err + } + + var previousComplete *snapshot.Manifest + var result []*snapshot.Manifest + + for _, p := range man { + if noLaterThan != nil && p.StartTime.After(*noLaterThan) { + continue + } + + if p.IncompleteReason == "" && (previousComplete == nil || p.StartTime.After(previousComplete.StartTime)) { + previousComplete = p + } + } + + if previousComplete != nil { + result = append(result, previousComplete) + } + + return result, nil +} + +//Restore restore specific sourcePath with given snapshotID and update progress +func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + log.Info("Start to restore...") + + kopiaCtx := logging.SetupKopiaLog(ctx, log) + + rootEntry, err := snapshotfs.FilesystemEntryFromIDWithPath(kopiaCtx, rep, snapshotID, false) + if err != nil { + return 0, 0, errors.Wrapf(err, "Unable to get filesystem entry for snapshot %v", snapshotID) + } + + path, err := filepath.Abs(dest) + if err != nil { + return 0, 0, errors.Wrapf(err, "Unable to resolve path %v", dest) + } + + output := &restore.FilesystemOutput{ + TargetPath: path, + OverwriteDirectories: true, + OverwriteFiles: true, + OverwriteSymlinks: true, + IgnorePermissionErrors: true, + } + + stat, err := restore.Entry(kopiaCtx, rep, output, rootEntry, restore.Options{ + Parallel: runtime.NumCPU(), + RestoreDirEntryAtDepth: math.MaxInt32, + Cancel: cancleCh, + ProgressCallback: func(ctx context.Context, stats restore.Stats) { + progress.ProgressBytes(stats.RestoredTotalFileSize, stats.EnqueuedTotalFileSize) + }, + }) + + if err != nil { + return 0, 0, errors.Wrapf(err, "Failed to copy snapshot data to the target") + } + return stat.RestoredTotalFileSize, stat.RestoredFileCount, nil +} diff --git a/pkg/uploader/kopia/snapshot_test.go b/pkg/uploader/kopia/snapshot_test.go new file mode 100644 index 0000000000..a5b6d81fbe --- /dev/null +++ b/pkg/uploader/kopia/snapshot_test.go @@ -0,0 +1,198 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopia + +import ( + "context" + "testing" + + "github.com/kopia/kopia/snapshot" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + repomocks "github.com/vmware-tanzu/velero/pkg/repository/mocks" + uploadermocks "github.com/vmware-tanzu/velero/pkg/uploader/mocks" +) + +type snapshotMockes struct { + policyMock *uploadermocks.Policy + snapshotMock *uploadermocks.Snapshot + uploderMock *uploadermocks.Uploader + repoWriterMock *repomocks.RepositoryWriter +} + +type mockArgs struct { + methodName string + returns []interface{} +} + +func InjectSnapshotFuncs() *snapshotMockes { + s := &snapshotMockes{ + policyMock: &uploadermocks.Policy{}, + snapshotMock: &uploadermocks.Snapshot{}, + uploderMock: &uploadermocks.Uploader{}, + repoWriterMock: &repomocks.RepositoryWriter{}, + } + + setPolicyFunc = s.policyMock.SetPolicy + treeForSourceFunc = s.policyMock.TreeForSource + applyRetentionPolicyFunc = s.policyMock.ApplyRetentionPolicy + loadSnapshotFunc = s.snapshotMock.LoadSnapshot + saveSnapshotFunc = s.snapshotMock.SaveSnapshot + return s +} + +func MockFuncs(s *snapshotMockes, args []mockArgs) { + s.snapshotMock.On("LoadSnapshot", mock.Anything, mock.Anything, mock.Anything).Return(args[0].returns...) + s.snapshotMock.On("SaveSnapshot", mock.Anything, mock.Anything, mock.Anything).Return(args[1].returns...) + s.policyMock.On("TreeForSource", mock.Anything, mock.Anything, mock.Anything).Return(args[2].returns...) + s.policyMock.On("ApplyRetentionPolicy", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[3].returns...) + s.policyMock.On("SetPolicy", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[4].returns...) + s.uploderMock.On("Upload", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[5].returns...) + s.repoWriterMock.On("Flush", mock.Anything).Return(args[6].returns...) +} + +func TestSnapshotSource(t *testing.T) { + + ctx := context.TODO() + sourceInfo := snapshot.SourceInfo{ + UserName: "testUserName", + Host: "testHost", + Path: "/var", + } + rootDir, err := getLocalFSEntry(sourceInfo.Path) + assert.NoError(t, err) + log := logrus.New() + manifest := &snapshot.Manifest{ + ID: "test", + RootEntry: &snapshot.DirEntry{}, + } + + testCases := []struct { + name string + args []mockArgs + notError bool + }{ + { + name: "regular test", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: true, + }, + { + name: "failed to load snapshot", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, errors.New("failed to load snapshot")}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: false, + }, + { + name: "failed to save snapshot", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, errors.New("failed to save snapshot")}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: false, + }, + { + name: "failed to apply policy", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, errors.New("failed to save snapshot")}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: false, + }, + { + name: "failed to set policy", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{errors.New("failed to set policy")}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: false, + }, + { + name: "failed to upload snapshot", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, errors.New("failed to upload snapshot")}}, + {methodName: "Flush", returns: []interface{}{nil}}, + }, + notError: false, + }, + { + name: "failed to flush repo", + args: []mockArgs{ + {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, + {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, errors.New("failed to save snapshot")}}, + {methodName: "TreeForSource", returns: []interface{}{nil, nil}}, + {methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}}, + {methodName: "SetPolicy", returns: []interface{}{nil}}, + {methodName: "Upload", returns: []interface{}{manifest, nil}}, + {methodName: "Flush", returns: []interface{}{errors.New("failed to flush repo")}}, + }, + notError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s := InjectSnapshotFuncs() + MockFuncs(s, tc.args) + _, _, err = SnapshotSource(ctx, s.repoWriterMock, s.uploderMock, sourceInfo, rootDir, "/", log, "TestSnapshotSource") + if tc.notError { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } + +} diff --git a/pkg/uploader/mocks/policy.go b/pkg/uploader/mocks/policy.go new file mode 100644 index 0000000000..3c1dcdd787 --- /dev/null +++ b/pkg/uploader/mocks/policy.go @@ -0,0 +1,92 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + "context" + + "github.com/kopia/kopia/snapshot/policy" + "github.com/stretchr/testify/mock" + + "github.com/kopia/kopia/repo" + + "github.com/kopia/kopia/snapshot" +) + +// policy is an autogenerated mock type for the TreeForSource type +type Policy struct { + mock.Mock +} + +// Execute provides a mock function with given fields: ctx, rep, si +func (_m *Policy) TreeForSource(ctx context.Context, rep repo.Repository, si snapshot.SourceInfo) (*policy.Tree, error) { + ret := _m.Called(ctx, rep, si) + + var r0 *policy.Tree + if rf, ok := ret.Get(0).(func(context.Context, repo.Repository, snapshot.SourceInfo) *policy.Tree); ok { + r0 = rf(ctx, rep, si) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*policy.Tree) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, repo.Repository, snapshot.SourceInfo) error); ok { + r1 = rf(ctx, rep, si) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ApplyRetentionPolicy provides a mock function with given fields: ctx, rep, sourceInfo, reallyDelete +func (_m *Policy) ApplyRetentionPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, reallyDelete bool) ([]*snapshot.Manifest, error) { + ret := _m.Called(ctx, rep, sourceInfo, reallyDelete) + + var r0 []*snapshot.Manifest + if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, bool) []*snapshot.Manifest); ok { + r0 = rf(ctx, rep, sourceInfo, reallyDelete) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*snapshot.Manifest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, bool) error); ok { + r1 = rf(ctx, rep, sourceInfo, reallyDelete) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +func (_m *Policy) SetPolicy(ctx context.Context, rep repo.RepositoryWriter, si snapshot.SourceInfo, pol *policy.Policy) error { + ret := _m.Called(ctx, rep, si, pol) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, *policy.Policy) error); ok { + r0 = rf(ctx, rep, si, pol) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/pkg/uploader/mocks/shim.go b/pkg/uploader/mocks/shim.go new file mode 100644 index 0000000000..1ec3acc580 --- /dev/null +++ b/pkg/uploader/mocks/shim.go @@ -0,0 +1,42 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// shimRepository is an autogenerated mock type for the shimRepository type +type ShimRepository struct { + mock.Mock +} + +// Flush provides a mock function with given fields: ctx +func (_m *ShimRepository) Flush(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/pkg/uploader/mocks/snapshot.go b/pkg/uploader/mocks/snapshot.go new file mode 100644 index 0000000000..c651242eb4 --- /dev/null +++ b/pkg/uploader/mocks/snapshot.go @@ -0,0 +1,76 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + "context" + + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/snapshot" + "github.com/stretchr/testify/mock" + + "github.com/kopia/kopia/repo" +) + +// snapshot is an autogenerated mock type for the snapshot type +type Snapshot struct { + mock.Mock +} + +// LoadSnapshot provides a mock function with given fields: ctx, rep, manifestID +func (_m *Snapshot) LoadSnapshot(ctx context.Context, rep repo.Repository, manifestID manifest.ID) (*snapshot.Manifest, error) { + ret := _m.Called(ctx, rep, manifestID) + + var r0 *snapshot.Manifest + if rf, ok := ret.Get(0).(func(context.Context, repo.Repository, manifest.ID) *snapshot.Manifest); ok { + r0 = rf(ctx, rep, manifestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*snapshot.Manifest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, repo.Repository, manifest.ID) error); ok { + r1 = rf(ctx, rep, manifestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveSnapshot provides a mock function with given fields: ctx, rep, man +func (_m *Snapshot) SaveSnapshot(ctx context.Context, rep repo.RepositoryWriter, man *snapshot.Manifest) (manifest.ID, error) { + ret := _m.Called(ctx, rep, man) + + var r0 manifest.ID + if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, *snapshot.Manifest) manifest.ID); ok { + r0 = rf(ctx, rep, man) + } else { + r0 = ret.Get(0).(manifest.ID) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, repo.RepositoryWriter, *snapshot.Manifest) error); ok { + r1 = rf(ctx, rep, man) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/uploader/mocks/uploader.go b/pkg/uploader/mocks/uploader.go new file mode 100644 index 0000000000..d8b5fa2fda --- /dev/null +++ b/pkg/uploader/mocks/uploader.go @@ -0,0 +1,63 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + "context" + + "github.com/kopia/kopia/fs" + "github.com/stretchr/testify/mock" + + "github.com/kopia/kopia/snapshot/policy" + + "github.com/kopia/kopia/snapshot" +) + +// Upload is an autogenerated mock type for the Upload type +type Uploader struct { + mock.Mock +} + +// Execute provides a mock function with given fields: ctx, source, policyTree, sourceInfo, previousManifests +func (_m *Uploader) Upload(ctx context.Context, source fs.Entry, policyTree *policy.Tree, sourceInfo snapshot.SourceInfo, previousManifests ...*snapshot.Manifest) (*snapshot.Manifest, error) { + _va := make([]interface{}, len(previousManifests)) + for _i := range previousManifests { + _va[_i] = previousManifests[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, source, policyTree, sourceInfo) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *snapshot.Manifest + if rf, ok := ret.Get(0).(func(context.Context, fs.Entry, *policy.Tree, snapshot.SourceInfo, ...*snapshot.Manifest) *snapshot.Manifest); ok { + r0 = rf(ctx, source, policyTree, sourceInfo, previousManifests...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*snapshot.Manifest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, fs.Entry, *policy.Tree, snapshot.SourceInfo, ...*snapshot.Manifest) error); ok { + r1 = rf(ctx, source, policyTree, sourceInfo, previousManifests...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/uploader/provider/kopia.go b/pkg/uploader/provider/kopia.go new file mode 100644 index 0000000000..9890a8a91b --- /dev/null +++ b/pkg/uploader/provider/kopia.go @@ -0,0 +1,212 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "strings" + + "github.com/kopia/kopia/snapshot/snapshotfs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/uploader/kopia" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repokeys "github.com/vmware-tanzu/velero/pkg/repository/keys" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/service" +) + +//BackupFunc mainly used to make testing more convenient +var BackupFunc = kopia.Backup +var RestoreFunc = kopia.Restore + +//kopiaProvider recorded info related with kopiaProvider +type kopiaProvider struct { + bkRepo udmrepo.BackupRepo + credGetter *credentials.CredentialGetter + log logrus.FieldLogger +} + +//NewKopiaUploaderProvider initialized with open or create a repository +func NewKopiaUploaderProvider( + ctx context.Context, + credGetter *credentials.CredentialGetter, + backupRepo *velerov1api.BackupRepository, + log logrus.FieldLogger, +) (Provider, error) { + kp := &kopiaProvider{ + log: log, + credGetter: credGetter, + } + //repoUID which is used to generate kopia repository config with unique directory path + repoUID := string(backupRepo.GetUID()) + repoOpt, err := udmrepo.NewRepoOptions( + udmrepo.WithPassword(kp, ""), + udmrepo.WithConfigFile("", repoUID), + udmrepo.WithDescription("Initial kopia uploader provider"), + ) + if err != nil { + return nil, errors.Wrapf(err, "error to get repo options") + } + + repoSvc := service.Create(log) + log.WithField("repoUID", repoUID).Info("Opening backup repo") + + kp.bkRepo, err = repoSvc.Open(ctx, *repoOpt) + if err != nil { + return nil, errors.Wrapf(err, "Failed to find kopia repository") + } + return kp, nil +} + +//CheckContext check context status check if context is timeout or cancel and backup restore once finished it will quit and return +func (kp *kopiaProvider) CheckContext(ctx context.Context, finishChan chan struct{}, restoreChan chan struct{}, uploader *snapshotfs.Uploader) { + select { + case <-finishChan: + kp.log.Infof("Action finished") + return + case <-ctx.Done(): + if uploader != nil { + uploader.Cancel() + kp.log.Infof("Backup is been canceled") + } + if restoreChan != nil { + close(restoreChan) + kp.log.Infof("Restore is been canceled") + } + return + } +} + +func (kp *kopiaProvider) Close(ctx context.Context) error { + return kp.bkRepo.Close(ctx) +} + +// RunBackup which will backup specific path and update backup progress +// return snapshotID, isEmptySnapshot, error +func (kp *kopiaProvider) RunBackup( + ctx context.Context, + path string, + tags map[string]string, + parentSnapshot string, + updater uploader.ProgressUpdater) (string, bool, error) { + if updater == nil { + return "", false, errors.New("Need to initial backup progress updater first") + } + + log := kp.log.WithFields(logrus.Fields{ + "path": path, + "parentSnapshot": parentSnapshot, + }) + repoWriter := kopia.NewShimRepo(kp.bkRepo) + kpUploader := snapshotfs.NewUploader(repoWriter) + prorgess := new(kopia.KopiaProgress) + prorgess.InitThrottle(backupProgressCheckInterval) + prorgess.Updater = updater + kpUploader.Progress = prorgess + quit := make(chan struct{}) + log.Info("Starting backup") + go kp.CheckContext(ctx, quit, nil, kpUploader) + + defer func() { + close(quit) + }() + + snapshotInfo, isSnapshotEmpty, err := BackupFunc(ctx, kpUploader, repoWriter, path, parentSnapshot, log) + if err != nil { + return "", false, errors.Wrapf(err, "Failed to run kopia backup") + } else if isSnapshotEmpty { + log.Debugf("Kopia backup got empty dir with path %s", path) + return "", true, nil + } else if snapshotInfo == nil { + return "", false, fmt.Errorf("failed to get kopia backup snapshot info for path %v", path) + } + + // which ensure that the statistic data of TotalBytes equal to BytesDone when finished + updater.UpdateProgress( + &uploader.UploaderProgress{ + TotalBytes: snapshotInfo.Size, + BytesDone: snapshotInfo.Size, + }, + ) + + log.Debugf("Kopia backup finished, snapshot ID %s, backup size %d", snapshotInfo.ID, snapshotInfo.Size) + return snapshotInfo.ID, false, nil +} + +func (kp *kopiaProvider) GetPassword(param interface{}) (string, error) { + if kp.credGetter.FromSecret == nil { + return "", errors.New("invalid credentials interface") + } + rawPass, err := kp.credGetter.FromSecret.Get(repokeys.RepoKeySelector()) + if err != nil { + return "", errors.Wrap(err, "error to get password") + } + + return strings.TrimSpace(rawPass), nil +} + +//RunRestore which will restore specific path and update restore progress +func (kp *kopiaProvider) RunRestore( + ctx context.Context, + snapshotID string, + volumePath string, + updater uploader.ProgressUpdater) error { + log := kp.log.WithFields(logrus.Fields{ + "snapshotID": snapshotID, + "volumePath": volumePath, + }) + repoWriter := kopia.NewShimRepo(kp.bkRepo) + prorgess := new(kopia.KopiaProgress) + prorgess.InitThrottle(restoreProgressCheckInterval) + prorgess.Updater = updater + restoreCancel := make(chan struct{}) + quit := make(chan struct{}) + + log.Info("Starting restore") + go kp.CheckContext(ctx, quit, restoreCancel, nil) + + defer func() { + if restoreCancel != nil { + close(restoreCancel) + } + close(quit) + }() + + size, fileCount, err := RestoreFunc(ctx, repoWriter, prorgess, snapshotID, volumePath, log, restoreCancel) + + if err != nil { + return errors.Wrapf(err, "Failed to run kopia restore") + } + + // which ensure that the statistic data of TotalBytes equal to BytesDone when finished + updater.UpdateProgress(&uploader.UploaderProgress{ + TotalBytes: size, + BytesDone: size, + }) + + output := fmt.Sprintf("Kopia restore finished, restore size %d, file count %d", size, fileCount) + + log.Info(output) + + return nil +} diff --git a/pkg/uploader/provider/kopia_test.go b/pkg/uploader/provider/kopia_test.go new file mode 100644 index 0000000000..955bf83f42 --- /dev/null +++ b/pkg/uploader/provider/kopia_test.go @@ -0,0 +1,136 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "testing" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/snapshot/snapshotfs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/uploader/kopia" +) + +func TestRunBackup(t *testing.T) { + var kp kopiaProvider + kp.log = logrus.New() + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)} + testCases := []struct { + name string + hookBackupFunc func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) + notError bool + }{ + { + name: "success to backup", + hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + return &uploader.SnapshotInfo{}, false, nil + }, + notError: true, + }, + { + name: "get error to backup", + hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + return &uploader.SnapshotInfo{}, false, errors.New("failed to backup") + }, + notError: false, + }, + { + name: "got empty snapshot", + hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + return nil, true, errors.New("snapshot is empty") + }, + notError: false, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + BackupFunc = tc.hookBackupFunc + _, _, err := kp.RunBackup(context.Background(), "var", nil, "", &updater) + if tc.notError { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + +func TestRunRestore(t *testing.T) { + var kp kopiaProvider + kp.log = logrus.New() + updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)} + + testCases := []struct { + name string + hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) + notError bool + }{ + { + name: "normal restore", + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + return 0, 0, nil + }, + notError: true, + }, + { + name: "failed to restore", + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + return 0, 0, errors.New("failed to restore") + }, + notError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + RestoreFunc = tc.hookRestoreFunc + err := kp.RunRestore(context.Background(), "", "/var", &updater) + if tc.notError { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + +type FakeBackupProgressUpdater struct { + PodVolumeBackup *velerov1api.PodVolumeBackup + Log logrus.FieldLogger + Ctx context.Context + Cli client.Client +} + +func (f *FakeBackupProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {} + +type FakeRestoreProgressUpdater struct { + PodVolumeRestore *velerov1api.PodVolumeRestore + Log logrus.FieldLogger + Ctx context.Context + Cli client.Client +} + +func (f *FakeRestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {} diff --git a/pkg/uploader/provider/provider.go b/pkg/uploader/provider/provider.go index 5a90a806f1..08dbb0ee52 100644 --- a/pkg/uploader/provider/provider.go +++ b/pkg/uploader/provider/provider.go @@ -18,27 +18,73 @@ package provider import ( "context" + "time" + "github.com/pkg/errors" + + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository/provider" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) +const restoreProgressCheckInterval = 10 * time.Second +const backupProgressCheckInterval = 10 * time.Second + // Provider which is designed for one pod volumn to do the backup or restore type Provider interface { - // RunBackup which will do backup for one specific volumn and return snapshotID error - // updateFunc which is used for update backup progress into related pvb status + // RunBackup which will do backup for one specific volumn and return snapshotID, isSnapshotEmpty, error + // updater is used for updating backup progress which implement by third-party RunBackup( ctx context.Context, path string, tags map[string]string, parentSnapshot string, - updateFunc func(velerov1api.PodVolumeOperationProgress)) (string, error) + updater uploader.ProgressUpdater) (string, bool, error) // RunRestore which will do restore for one specific volumn with given snapshot id and return error - // updateFunc which is used for update restore progress into related pvr status + // updater is used for updating backup progress which implement by third-party RunRestore( ctx context.Context, snapshotID string, volumePath string, - updateFunc func(velerov1api.PodVolumeOperationProgress)) error + updater uploader.ProgressUpdater) error // Close which will close related repository - Close(ctx context.Context) + Close(ctx context.Context) error +} + +// NewUploaderProvider initialize provider with specific uploaderType +func NewUploaderProvider( + ctx context.Context, + client client.Client, + uploaderType string, + repoIdentifier string, + bsl *velerov1api.BackupStorageLocation, + backupRepo *velerov1api.BackupRepository, + credGetter *credentials.CredentialGetter, + repoKeySelector *v1.SecretKeySelector, + log logrus.FieldLogger, +) (Provider, error) { + if credGetter.FromFile == nil { + return nil, errors.New("uninitialized FileStore credentail is not supported") + } + if uploaderType == uploader.KopiaType { + // We use the hardcode repositoryType velerov1api.BackupRepositoryTypeKopia for now, because we have only one implementation of unified repo. + // TODO: post v1.10, replace the hardcode. In future, when we have multiple implementations of Unified Repo (besides Kopia), we will add the + // repositoryType to BSL, because by then, we are not able to hardcode the repositoryType to BackupRepositoryTypeKopia for Unified Repo. + if err := provider.NewUnifiedRepoProvider(*credGetter, velerov1api.BackupRepositoryTypeKopia, log).ConnectToRepo(ctx, provider.RepoParam{BackupLocation: bsl, BackupRepo: backupRepo}); err != nil { + return nil, errors.Wrap(err, "failed to connect repository") + } + return NewKopiaUploaderProvider(ctx, credGetter, backupRepo, log) + } else { + if err := provider.NewResticRepositoryProvider(credGetter.FromFile, filesystem.NewFileSystem(), log).ConnectToRepo(ctx, provider.RepoParam{BackupLocation: bsl, BackupRepo: backupRepo}); err != nil { + return nil, errors.Wrap(err, "failed to connect repository") + } + return NewResticUploaderProvider(repoIdentifier, bsl, credGetter, repoKeySelector, log) + } } diff --git a/pkg/uploader/provider/restic.go b/pkg/uploader/provider/restic.go new file mode 100644 index 0000000000..2715b9e9d2 --- /dev/null +++ b/pkg/uploader/provider/restic.go @@ -0,0 +1,184 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/util/filesystem" +) + +// mainly used to make testing more convenient +var ResticBackupCMDFunc = restic.BackupCommand +var ResticRestoreCMDFunc = restic.RestoreCommand + +type resticProvider struct { + repoIdentifier string + credentialsFile string + caCertFile string + cmdEnv []string + extraFlags []string + bsl *velerov1api.BackupStorageLocation + log logrus.FieldLogger +} + +func NewResticUploaderProvider( + repoIdentifier string, + bsl *velerov1api.BackupStorageLocation, + credGetter *credentials.CredentialGetter, + repoKeySelector *v1.SecretKeySelector, + log logrus.FieldLogger, +) (Provider, error) { + provider := resticProvider{ + repoIdentifier: repoIdentifier, + bsl: bsl, + log: log, + } + + var err error + provider.credentialsFile, err = credGetter.FromFile.Path(repoKeySelector) + if err != nil { + return nil, errors.Wrap(err, "error creating temp restic credentials file") + } + + // if there's a caCert on the ObjectStorage, write it to disk so that it can be passed to restic + if bsl.Spec.ObjectStorage != nil && bsl.Spec.ObjectStorage.CACert != nil { + provider.caCertFile, err = restic.TempCACertFile(bsl.Spec.ObjectStorage.CACert, bsl.Name, filesystem.NewFileSystem()) + if err != nil { + return nil, errors.Wrap(err, "error create temp cert file") + } + } + + provider.cmdEnv, err = restic.CmdEnv(bsl, credGetter.FromFile) + if err != nil { + return nil, errors.Wrap(err, "error generating repository cmnd env") + } + + // #4820: restrieve insecureSkipTLSVerify from BSL configuration for + // AWS plugin. If nothing is return, that means insecureSkipTLSVerify + // is not enable for Restic command. + skipTLSRet := restic.GetInsecureSkipTLSVerifyFromBSL(bsl, log) + if len(skipTLSRet) > 0 { + provider.extraFlags = append(provider.extraFlags, skipTLSRet) + } + + return &provider, nil +} + +func (rp *resticProvider) Close(ctx context.Context) error { + _, err := os.Stat(rp.credentialsFile) + if err == nil { + return os.Remove(rp.credentialsFile) + } else if !os.IsNotExist(err) { + return errors.Errorf("failed to get file %s info with error %v", rp.credentialsFile, err) + } + + _, err = os.Stat(rp.caCertFile) + if err == nil { + return os.Remove(rp.caCertFile) + } else if !os.IsNotExist(err) { + return errors.Errorf("failed to get file %s info with error %v", rp.caCertFile, err) + } + return nil +} + +// RunBackup runs a `backup` command and watches the output to provide +// progress updates to the caller and return snapshotID, isEmptySnapshot, error +func (rp *resticProvider) RunBackup( + ctx context.Context, + path string, + tags map[string]string, + parentSnapshot string, + updater uploader.ProgressUpdater) (string, bool, error) { + if updater == nil { + return "", false, errors.New("Need to initial backup progress updater first") + } + + log := rp.log.WithFields(logrus.Fields{ + "path": path, + "parentSnapshot": parentSnapshot, + }) + + backupCmd := ResticBackupCMDFunc(rp.repoIdentifier, rp.credentialsFile, path, tags) + backupCmd.Env = rp.cmdEnv + backupCmd.CACertFile = rp.caCertFile + if len(rp.extraFlags) != 0 { + backupCmd.ExtraFlags = append(backupCmd.ExtraFlags, rp.extraFlags...) + } + + if parentSnapshot != "" { + backupCmd.ExtraFlags = append(backupCmd.ExtraFlags, fmt.Sprintf("--parent=%s", parentSnapshot)) + } + + summary, stderrBuf, err := restic.RunBackup(backupCmd, log, updater) + if err != nil { + if strings.Contains(err.Error(), "snapshot is empty") { + log.Debugf("Restic backup got empty dir with %s path", path) + return "", true, nil + } + return "", false, errors.WithStack(fmt.Errorf("error running restic backup command %s with error: %v stderr: %v", backupCmd.String(), err, stderrBuf)) + } + // GetSnapshotID + snapshotIdCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags) + snapshotIdCmd.Env = rp.cmdEnv + snapshotIdCmd.CACertFile = rp.caCertFile + + snapshotID, err := restic.GetSnapshotID(snapshotIdCmd) + if err != nil { + return "", false, errors.WithStack(fmt.Errorf("error getting snapshot id with error: %v", err)) + } + log.Infof("Run command=%s, stdout=%s, stderr=%s", backupCmd.String(), summary, stderrBuf) + return snapshotID, false, nil +} + +// RunRestore runs a `restore` command and monitors the volume size to +// provide progress updates to the caller. +func (rp *resticProvider) RunRestore( + ctx context.Context, + snapshotID string, + volumePath string, + updater uploader.ProgressUpdater) error { + if updater == nil { + return errors.New("Need to initial backup progress updater first") + } + log := rp.log.WithFields(logrus.Fields{ + "snapshotID": snapshotID, + "volumePath": volumePath, + }) + + restoreCmd := ResticRestoreCMDFunc(rp.repoIdentifier, rp.credentialsFile, snapshotID, volumePath) + restoreCmd.Env = rp.cmdEnv + restoreCmd.CACertFile = rp.caCertFile + if len(rp.extraFlags) != 0 { + restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, rp.extraFlags...) + } + stdout, stderr, err := restic.RunRestore(restoreCmd, log, updater) + + log.Infof("Run command=%s, stdout=%s, stderr=%s", restoreCmd.Command, stdout, stderr) + return err +} diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go new file mode 100644 index 0000000000..042602777a --- /dev/null +++ b/pkg/uploader/provider/restic_test.go @@ -0,0 +1,106 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "strings" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/uploader" +) + +func TestResticRunBackup(t *testing.T) { + var rp resticProvider + rp.log = logrus.New() + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: rp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)} + testCases := []struct { + name string + hookBackupFunc func(repoIdentifier string, passwordFile string, path string, tags map[string]string) *restic.Command + hookRunBackupFunc func(backupCmd *restic.Command, log logrus.FieldLogger, updater uploader.ProgressUpdater) (string, string, error) + errorHandleFunc func(err error) bool + }{ + { + name: "wrong restic execute command", + hookBackupFunc: func(repoIdentifier string, passwordFile string, path string, tags map[string]string) *restic.Command { + return &restic.Command{Command: "date"} + }, + errorHandleFunc: func(err error) bool { + return strings.Contains(err.Error(), "executable file not found in") + }, + }, + { + name: "wrong parsing json summary content", + hookBackupFunc: func(repoIdentifier string, passwordFile string, path string, tags map[string]string) *restic.Command { + return &restic.Command{Command: "version"} + }, + errorHandleFunc: func(err error) bool { + return strings.Contains(err.Error(), "executable file not found in") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ResticBackupCMDFunc = tc.hookBackupFunc + _, _, err := rp.RunBackup(context.Background(), "var", nil, "", &updater) + rp.log.Infof("test name %v error %v", tc.name, err) + require.Equal(t, true, tc.errorHandleFunc(err)) + }) + } +} + +func TestResticRunRestore(t *testing.T) { + var rp resticProvider + rp.log = logrus.New() + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: rp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)} + ResticRestoreCMDFunc = func(repoIdentifier, passwordFile, snapshotID, target string) *restic.Command { + return &restic.Command{Args: []string{""}} + } + testCases := []struct { + name string + hookResticRestoreFunc func(repoIdentifier, passwordFile, snapshotID, target string) *restic.Command + errorHandleFunc func(err error) bool + }{ + { + name: "wrong restic execute command", + hookResticRestoreFunc: func(repoIdentifier, passwordFile, snapshotID, target string) *restic.Command { + return &restic.Command{Args: []string{"date"}} + }, + errorHandleFunc: func(err error) bool { + return strings.Contains(err.Error(), "executable file not found ") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ResticRestoreCMDFunc = tc.hookResticRestoreFunc + err := rp.RunRestore(context.Background(), "", "var", &updater) + rp.log.Infof("test name %v error %v", tc.name, err) + require.Equal(t, true, tc.errorHandleFunc(err)) + }) + } + +} diff --git a/pkg/uploader/types.go b/pkg/uploader/types.go index 134e36cceb..015a2c156e 100644 --- a/pkg/uploader/types.go +++ b/pkg/uploader/types.go @@ -22,10 +22,8 @@ import ( ) const ( - ResticType = "restic" - KopiaType = "kopia" - VeleroBackup = "backup" - VeleroRestore = "restore" + ResticType = "restic" + KopiaType = "kopia" ) // ValidateUploaderType validates if the input param is a valid uploader type. @@ -43,7 +41,13 @@ type SnapshotInfo struct { Size int64 `json:"Size"` } +//UploaderProgress which defined two variables to record progress type UploaderProgress struct { TotalBytes int64 `json:"totalBytes,omitempty"` BytesDone int64 `json:"doneBytes,omitempty"` } + +//UploaderProgress which defined generic interface to update progress +type ProgressUpdater interface { + UpdateProgress(p *UploaderProgress) +} diff --git a/pkg/util/csi/reset.go b/pkg/util/csi/reset.go index 5065aae780..f762f8aaa6 100644 --- a/pkg/util/csi/reset.go +++ b/pkg/util/csi/reset.go @@ -27,7 +27,7 @@ import ( // It will move the snapshot Handle to the source to avoid the snapshot-controller creating a snapshot when it's // synced by the backup sync controller. // It will return an error if the snapshot handle is not set, which should not happen when this func is called. -func ResetVolumeSnapshotContent(snapCont *snapshotv1api.VolumeSnapshotContent) error { +func ResetVolumeSnapshotContent(snapCont snapshotv1api.VolumeSnapshotContent) error { if snapCont.Status != nil && snapCont.Status.SnapshotHandle != nil && len(*snapCont.Status.SnapshotHandle) > 0 { v := *snapCont.Status.SnapshotHandle snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{ diff --git a/pkg/util/kube/periodical_enqueue_source.go b/pkg/util/kube/periodical_enqueue_source.go index 20b658c61a..1b0ec1a31f 100644 --- a/pkg/util/kube/periodical_enqueue_source.go +++ b/pkg/util/kube/periodical_enqueue_source.go @@ -23,24 +23,29 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -func NewPeriodicalEnqueueSource(logger logrus.FieldLogger, client client.Client, objList client.ObjectList, period time.Duration, filters ...func(object client.Object) bool) *PeriodicalEnqueueSource { +func NewPeriodicalEnqueueSource( + logger logrus.FieldLogger, + client client.Client, + objList client.ObjectList, + period time.Duration, + option PeriodicalEnqueueSourceOption) *PeriodicalEnqueueSource { return &PeriodicalEnqueueSource{ - logger: logger.WithField("resource", reflect.TypeOf(objList).String()), - Client: client, - objList: objList, - period: period, - filterFuncs: filters, + logger: logger.WithField("resource", reflect.TypeOf(objList).String()), + Client: client, + objList: objList, + period: period, + option: option, } } @@ -49,13 +54,18 @@ func NewPeriodicalEnqueueSource(logger logrus.FieldLogger, client client.Client, // the reconcile logic periodically type PeriodicalEnqueueSource struct { client.Client - logger logrus.FieldLogger - objList client.ObjectList - period time.Duration - filterFuncs []func(object client.Object) bool + logger logrus.FieldLogger + objList client.ObjectList + period time.Duration + option PeriodicalEnqueueSourceOption } -func (p *PeriodicalEnqueueSource) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, pre ...predicate.Predicate) error { +type PeriodicalEnqueueSourceOption struct { + OrderFunc func(objList client.ObjectList) client.ObjectList +} + +// Start enqueue items periodically. The predicates only apply to the GenericEvent +func (p *PeriodicalEnqueueSource) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, predicates ...predicate.Predicate) error { go wait.Until(func() { p.logger.Debug("enqueueing resources ...") if err := p.List(ctx, p.objList); err != nil { @@ -66,20 +76,23 @@ func (p *PeriodicalEnqueueSource) Start(ctx context.Context, h handler.EventHand p.logger.Debug("no resources, skip") return } + if p.option.OrderFunc != nil { + p.objList = p.option.OrderFunc(p.objList) + } if err := meta.EachListItem(p.objList, func(object runtime.Object) error { - obj, ok := object.(metav1.Object) + obj, ok := object.(client.Object) if !ok { p.logger.Error("%s's type isn't metav1.Object", object.GetObjectKind().GroupVersionKind().String()) return nil } - for _, filter := range p.filterFuncs { - if filter != nil { - if enqueueObj := filter(object.(client.Object)); !enqueueObj { - p.logger.Debugf("skip enqueue object %s/%s due to filter function.", obj.GetNamespace(), obj.GetName()) - return nil - } + event := event.GenericEvent{Object: obj} + for _, predicate := range predicates { + if !predicate.Generic(event) { + p.logger.Debugf("skip enqueue object %s/%s due to the predicate.", obj.GetNamespace(), obj.GetName()) + return nil } } + q.Add(ctrl.Request{ NamespacedName: types.NamespacedName{ Namespace: obj.GetNamespace(), diff --git a/pkg/util/kube/periodical_enqueue_source_test.go b/pkg/util/kube/periodical_enqueue_source_test.go index 3621533477..8d5e142dd3 100644 --- a/pkg/util/kube/periodical_enqueue_source_test.go +++ b/pkg/util/kube/periodical_enqueue_source_test.go @@ -23,11 +23,14 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/net/context" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/util/workqueue" crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/vmware-tanzu/velero/internal/storage" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -39,7 +42,7 @@ func TestStart(t *testing.T) { ctx, cancelFunc := context.WithCancel(context.TODO()) client := (&fake.ClientBuilder{}).Build() queue := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) - source := NewPeriodicalEnqueueSource(logrus.WithContext(ctx), client, &velerov1.ScheduleList{}, 1*time.Second) + source := NewPeriodicalEnqueueSource(logrus.WithContext(ctx), client, &velerov1.ScheduleList{}, 1*time.Second, PeriodicalEnqueueSourceOption{}) require.Nil(t, source.Start(ctx, nil, queue)) @@ -65,7 +68,7 @@ func TestStart(t *testing.T) { require.Equal(t, 0, queue.Len()) } -func TestFilter(t *testing.T) { +func TestPredicate(t *testing.T) { require.Nil(t, velerov1.AddToScheme(scheme.Scheme)) ctx, cancelFunc := context.WithCancel(context.TODO()) @@ -76,9 +79,61 @@ func TestFilter(t *testing.T) { client, &velerov1.BackupStorageLocationList{}, 1*time.Second, - func(object crclient.Object) bool { - location := object.(*velerov1.BackupStorageLocation) - return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, 1*time.Minute, logrus.WithContext(ctx).WithField("BackupStorageLocation", location.Name)) + PeriodicalEnqueueSourceOption{}, + ) + + require.Nil(t, source.Start(ctx, nil, queue, NewGenericEventPredicate(func(object crclient.Object) bool { + location := object.(*velerov1.BackupStorageLocation) + return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, 1*time.Minute, logrus.WithContext(ctx).WithField("BackupStorageLocation", location.Name)) + }))) + + // Should not patch a backup storage location object status phase + // if the location's validation frequency is specifically set to zero + require.Nil(t, client.Create(ctx, &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "location1", + Namespace: "default", + }, + Spec: velerov1.BackupStorageLocationSpec{ + ValidationFrequency: &metav1.Duration{Duration: 0}, + }, + Status: velerov1.BackupStorageLocationStatus{ + LastValidationTime: &metav1.Time{Time: time.Now()}, + }, + })) + time.Sleep(2 * time.Second) + + require.Equal(t, 0, queue.Len()) + + cancelFunc() +} + +func TestOrder(t *testing.T) { + require.Nil(t, velerov1.AddToScheme(scheme.Scheme)) + + ctx, cancelFunc := context.WithCancel(context.TODO()) + client := (&fake.ClientBuilder{}).Build() + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) + source := NewPeriodicalEnqueueSource( + logrus.WithContext(ctx), + client, + &velerov1.BackupStorageLocationList{}, + 1*time.Second, + PeriodicalEnqueueSourceOption{ + OrderFunc: func(objList crclient.ObjectList) crclient.ObjectList { + locationList := &velerov1.BackupStorageLocationList{} + objArray := make([]runtime.Object, 0) + + // Generate BSL array. + locations, _ := meta.ExtractList(objList) + // Move default BSL to tail of array. + objArray = append(objArray, locations[1]) + objArray = append(objArray, locations[0]) + + meta.SetList(locationList, objArray) + + return locationList + }, }, ) @@ -98,9 +153,26 @@ func TestFilter(t *testing.T) { LastValidationTime: &metav1.Time{Time: time.Now()}, }, })) + require.Nil(t, client.Create(ctx, &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "location2", + Namespace: "default", + }, + Spec: velerov1.BackupStorageLocationSpec{ + ValidationFrequency: &metav1.Duration{Duration: 0}, + Default: true, + }, + Status: velerov1.BackupStorageLocationStatus{ + LastValidationTime: &metav1.Time{Time: time.Now()}, + }, + })) time.Sleep(2 * time.Second) - require.Equal(t, 0, queue.Len()) + first, _ := queue.Get() + bsl := &velerov1.BackupStorageLocation{} + require.Equal(t, "location2", first.(reconcile.Request).Name) + require.Nil(t, client.Get(ctx, first.(reconcile.Request).NamespacedName, bsl)) + require.Equal(t, true, bsl.Spec.Default) cancelFunc() } diff --git a/pkg/util/kube/pod.go b/pkg/util/kube/pod.go new file mode 100644 index 0000000000..4e589c4f04 --- /dev/null +++ b/pkg/util/kube/pod.go @@ -0,0 +1,39 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package kube + +import ( + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" +) + +// IsPodRunning does a well-rounded check to make sure the specified pod is running stably. +// If not, return the error found +func IsPodRunning(pod *corev1api.Pod) error { + if pod.Spec.NodeName == "" { + return errors.Errorf("pod is not scheduled, name=%s, namespace=%s, phase=%s", pod.Name, pod.Namespace, pod.Status.Phase) + } + + if pod.Status.Phase != corev1api.PodRunning { + return errors.Errorf("pod is not running, name=%s, namespace=%s, phase=%s", pod.Name, pod.Namespace, pod.Status.Phase) + } + + if pod.DeletionTimestamp != nil { + return errors.Errorf("pod is being terminated, name=%s, namespace=%s, phase=%s", pod.Name, pod.Namespace, pod.Status.Phase) + } + + return nil +} diff --git a/pkg/util/kube/predicate.go b/pkg/util/kube/predicate.go index 3073ef8816..a9660b34e7 100644 --- a/pkg/util/kube/predicate.go +++ b/pkg/util/kube/predicate.go @@ -19,6 +19,7 @@ package kube import ( "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -45,3 +46,53 @@ func (SpecChangePredicate) Update(e event.UpdateEvent) bool { newSpec := reflect.ValueOf(e.ObjectNew).Elem().FieldByName("Spec") return !reflect.DeepEqual(oldSpec.Interface(), newSpec.Interface()) } + +// NewGenericEventPredicate creates a new Predicate that checks the Generic event with the provided func +func NewGenericEventPredicate(f func(object client.Object) bool) predicate.Predicate { + return predicate.Funcs{ + GenericFunc: func(event event.GenericEvent) bool { + return f(event.Object) + }, + } +} + +// NewAllEventPredicate creates a new Predicate that checks all the events with the provided func +func NewAllEventPredicate(f func(object client.Object) bool) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event event.CreateEvent) bool { + return f(event.Object) + }, + DeleteFunc: func(event event.DeleteEvent) bool { + return f(event.Object) + }, + UpdateFunc: func(event event.UpdateEvent) bool { + return f(event.ObjectNew) + }, + GenericFunc: func(event event.GenericEvent) bool { + return f(event.Object) + }, + } +} + +// FalsePredicate always returns false for all kinds of events +type FalsePredicate struct{} + +// Create always returns false +func (f FalsePredicate) Create(event.CreateEvent) bool { + return false +} + +// Delete always returns false +func (f FalsePredicate) Delete(event.DeleteEvent) bool { + return false +} + +// Update always returns false +func (f FalsePredicate) Update(event.UpdateEvent) bool { + return false +} + +// Generic always returns false +func (f FalsePredicate) Generic(event.GenericEvent) bool { + return false +} diff --git a/pkg/util/kube/predicate_test.go b/pkg/util/kube/predicate_test.go index d1c3be8df5..40d1b8c9c0 100644 --- a/pkg/util/kube/predicate_test.go +++ b/pkg/util/kube/predicate_test.go @@ -178,3 +178,22 @@ func TestSpecChangePredicate(t *testing.T) { }) } } + +func TestNewGenericEventPredicate(t *testing.T) { + predicate := NewGenericEventPredicate(func(object client.Object) bool { + return false + }) + + assert.False(t, predicate.Generic(event.GenericEvent{})) +} + +func TestNewAllEventPredicate(t *testing.T) { + predicate := NewAllEventPredicate(func(object client.Object) bool { + return false + }) + + assert.False(t, predicate.Create(event.CreateEvent{})) + assert.False(t, predicate.Update(event.UpdateEvent{})) + assert.False(t, predicate.Delete(event.DeleteEvent{})) + assert.False(t, predicate.Generic(event.GenericEvent{})) +} diff --git a/pkg/util/logging/kopia_log.go b/pkg/util/logging/kopia_log.go new file mode 100644 index 0000000000..d70784d237 --- /dev/null +++ b/pkg/util/logging/kopia_log.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "context" + + "github.com/kopia/kopia/repo/logging" + "github.com/sirupsen/logrus" +) + +type kopiaLog struct { + module string + logger logrus.FieldLogger +} + +// SetupKopiaLog sets the Kopia log handler to the specific context, Kopia modules +// call the logger in the context to write logs +func SetupKopiaLog(ctx context.Context, logger logrus.FieldLogger) context.Context { + return logging.WithLogger(ctx, func(module string) logging.Logger { + return &kopiaLog{ + module: module, + logger: logger, + } + }) +} + +func (kl *kopiaLog) Debugf(msg string, args ...interface{}) { + logger := kl.logger.WithField("logModule", kl.getLogModule()) + logger.Debugf(msg, args...) +} + +func (kl *kopiaLog) Debugw(msg string, keyValuePairs ...interface{}) { + logger := kl.logger.WithField("logModule", kl.getLogModule()) + logger.WithFields(getLogFields(keyValuePairs...)).Debug(msg) +} + +func (kl *kopiaLog) Infof(msg string, args ...interface{}) { + logger := kl.logger.WithField("logModule", kl.getLogModule()) + logger.Infof(msg, args...) +} + +func (kl *kopiaLog) Warnf(msg string, args ...interface{}) { + logger := kl.logger.WithField("logModule", kl.getLogModule()) + logger.Warnf(msg, args...) +} + +// We see Kopia generates error logs for some normal cases or non-critical +// cases. So Kopia's error logs are regarded as warning logs so that they don't +// affect Velero's workflow. +func (kl *kopiaLog) Errorf(msg string, args ...interface{}) { + logger := kl.logger.WithFields(logrus.Fields{ + "logModule": kl.getLogModule(), + "sublevel": "error", + }) + + logger.Warnf(msg, args...) +} + +func (kl *kopiaLog) getLogModule() string { + return "kopia/" + kl.module +} + +func getLogFields(keyValuePairs ...interface{}) map[string]interface{} { + m := map[string]interface{}{} + for i := 0; i+1 < len(keyValuePairs); i += 2 { + s, ok := keyValuePairs[i].(string) + if !ok { + s = "non-string-key" + } + + m[s] = keyValuePairs[i+1] + } + + return m +} diff --git a/pkg/util/logging/kopia_log_test.go b/pkg/util/logging/kopia_log_test.go new file mode 100644 index 0000000000..2498cb418b --- /dev/null +++ b/pkg/util/logging/kopia_log_test.go @@ -0,0 +1,86 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetLogFields(t *testing.T) { + testCases := []struct { + name string + pairs []interface{} + expected map[string]interface{} + }{ + { + name: "normal", + pairs: []interface{}{ + "fake-key1", + "fake-value1", + "fake-key2", + 10, + "fake-key3", + struct{ v int }{v: 10}, + }, + expected: map[string]interface{}{ + "fake-key1": "fake-value1", + "fake-key2": 10, + "fake-key3": struct{ v int }{v: 10}, + }, + }, + { + name: "non string key", + pairs: []interface{}{ + "fake-key1", + "fake-value1", + 10, + 10, + "fake-key3", + struct{ v int }{v: 10}, + }, + expected: map[string]interface{}{ + "fake-key1": "fake-value1", + "non-string-key": 10, + "fake-key3": struct{ v int }{v: 10}, + }, + }, + { + name: "missing value", + pairs: []interface{}{ + "fake-key1", + "fake-value1", + "fake-key2", + 10, + "fake-key3", + }, + expected: map[string]interface{}{ + "fake-key1": "fake-value1", + "fake-key2": 10, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + m := getLogFields(tc.pairs...) + + require.Equal(t, tc.expected, m) + }) + } +} diff --git a/pkg/volume/snapshotlocation.go b/pkg/volume/snapshotlocation.go new file mode 100644 index 0000000000..62675dd444 --- /dev/null +++ b/pkg/volume/snapshotlocation.go @@ -0,0 +1,44 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// UpdateVolumeSnapshotLocationWithCredentialConfig adds the credentials file path to the config +// if the VSL specifies a credential +func UpdateVolumeSnapshotLocationWithCredentialConfig(location *velerov1api.VolumeSnapshotLocation, credentialStore credentials.FileStore, logger logrus.FieldLogger) error { + if location.Spec.Config == nil { + location.Spec.Config = make(map[string]string) + } + // If the VSL specifies a credential, fetch its path on disk and pass to + // plugin via the config. + if location.Spec.Credential != nil && credentialStore != nil { + credsFile, err := credentialStore.Path(location.Spec.Credential) + if err != nil { + return errors.Wrap(err, "unable to get credentials") + } + + location.Spec.Config["credentialsFile"] = credsFile + } + return nil +} diff --git a/site/content/contributors/04-eleanor-millman.md b/site/content/contributors/04-eleanor-millman.md deleted file mode 100644 index f06e96fbfb..0000000000 --- a/site/content/contributors/04-eleanor-millman.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -first_name: Eleanor -last_name: Millman -image: /img/contributors/eleanor-millman.jpg -github_handle: eleanor-millman ---- -Product Manager diff --git a/site/content/contributors/04-pradeep-chaturvedi.md b/site/content/contributors/04-pradeep-chaturvedi.md new file mode 100644 index 0000000000..73d1922062 --- /dev/null +++ b/site/content/contributors/04-pradeep-chaturvedi.md @@ -0,0 +1,7 @@ +--- +first_name: Pradeep +last_name: Chaturvedi +image: /img/contributors/pradeep-chaturvedi.png +github_handle: pradeepkchaturvedi +--- +Product Manager diff --git a/site/content/docs/main/api-types/schedule.md b/site/content/docs/main/api-types/schedule.md index 5bc430c8ff..31130f43ae 100644 --- a/site/content/docs/main/api-types/schedule.md +++ b/site/content/docs/main/api-types/schedule.md @@ -136,6 +136,9 @@ spec: # processed. Only "exec" hooks are supported. post: # Same content as pre above. + # Specifies whether to use OwnerReferences on backups created by this Schedule. + # Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional. + useOwnerReferencesInBackup: false status: # The current phase of the latest scheduled backup. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed. phase: "" diff --git a/site/content/docs/main/api-types/volumesnapshotlocation.md b/site/content/docs/main/api-types/volumesnapshotlocation.md index 28ac332222..e6758f8faf 100644 --- a/site/content/docs/main/api-types/volumesnapshotlocation.md +++ b/site/content/docs/main/api-types/volumesnapshotlocation.md @@ -21,6 +21,9 @@ metadata: namespace: velero spec: provider: aws + credential: + name: secret-name + key: key-in-secret config: region: us-west-2 profile: "default" @@ -37,4 +40,7 @@ The configurable parameters are as follows: | --- | --- | --- | --- | | `provider` | String | Required Field | The name for whichever storage provider will be used to create/store the volume snapshots. See [your volume snapshot provider's plugin documentation](../supported-providers) for the appropriate value to use. | | `config` | map string string | None (Optional) | Provider-specific configuration keys/values to be passed to the volume snapshotter plugin. See [your volume snapshot provider's plugin documentation](../supported-providers) for details. | +| `credential` | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) | Optional Field | The credential information to be used with this location. | +| `credential/name` | String | Optional Field | The name of the secret within the Velero namespace which contains the credential information. | +| `credential/key` | String | Optional Field | The key to use within the secret. | {{< /table >}} diff --git a/site/content/docs/main/backup-hooks.md b/site/content/docs/main/backup-hooks.md index ef38dc7fed..87310fbb3c 100644 --- a/site/content/docs/main/backup-hooks.md +++ b/site/content/docs/main/backup-hooks.md @@ -52,9 +52,6 @@ spec. This examples walks you through using both pre and post hooks for freezing a file system. Freezing the file system is useful to ensure that all pending disk I/O operations have completed prior to taking a snapshot. -This example uses [examples/nginx-app/with-pv.yaml][2]. Follow the [steps for your provider][3] to -setup this example. - ### Annotations The Velero [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly @@ -108,4 +105,3 @@ Note that the container must support the shell command you use. [1]: api-types/backup.md [2]: https://github.com/vmware-tanzu/velero/blob/main/examples/nginx-app/with-pv.yaml -[3]: cloud-common.md diff --git a/site/content/docs/main/locations.md b/site/content/docs/main/locations.md index d5dbc710a6..fe74391f9e 100644 --- a/site/content/docs/main/locations.md +++ b/site/content/docs/main/locations.md @@ -26,8 +26,10 @@ This configuration design enables a number of different use cases, including: All [plugins maintained by the Velero team][5] support this feature. If you are using a plugin from another provider, please check their documentation to determine if this feature is supported. -- Velero only supports a single set of credentials for `VolumeSnapshotLocations`. - Velero will always use the credentials provided at install time (stored in the `cloud-credentials` secret) for volume snapshots. +- Velero supports multiple credentials for `VolumeSnapshotLocations`, allowing you to specify the credentials to use with any `VolumeSnapshotLocation`. + However, use of this feature requires support within the plugin for the object storage provider you wish to use. + All [plugins maintained by the Velero team][5] support this feature. + If you are using a plugin from another provider, please check their documentation to determine if this feature is supported. - Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take a Velero backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail. diff --git a/site/content/docs/main/troubleshooting.md b/site/content/docs/main/troubleshooting.md index cdfd253059..dd1a331493 100644 --- a/site/content/docs/main/troubleshooting.md +++ b/site/content/docs/main/troubleshooting.md @@ -175,9 +175,9 @@ Follow the below troubleshooting steps to confirm that Velero is using the corre ``` -### Troubleshooting `BackupStorageLocation` credentials +### Troubleshooting `BackupStorageLocation` and `VolumeSnapshotLocation` credentials -Follow the below troubleshooting steps to confirm that Velero is using the correct credentials if using credentials specific to a [`BackupStorageLocation`][10]: +Follow the below troubleshooting steps to confirm that Velero is using the correct credentials if using credentials specific to a [`BackupStorageLocation` or `VolumeSnapshotLocation`][10]: 1. Confirm that the object storage provider plugin being used supports multiple credentials. If the logs from the Velero deployment contain the error message `"config has invalid keys credentialsFile"`, the version of your object storage plugin does not yet support multiple credentials. @@ -186,7 +186,7 @@ Follow the below troubleshooting steps to confirm that Velero is using the corre If you are using a plugin from a different provider, please contact them for further advice. -1. Confirm that the secret and key referenced by the `BackupStorageLocation` exists in the Velero namespace and has the correct content: +1. Confirm that the secret and key referenced by the `BackupStorageLocation` or `VolumeSnapshotLocation` exists in the Velero namespace and has the correct content: ```bash # Determine which secret and key the BackupStorageLocation is using BSL_SECRET=$(kubectl get backupstoragelocations.velero.io -n velero -o yaml -o jsonpath={.spec.credential.name}) @@ -197,11 +197,21 @@ Follow the below troubleshooting steps to confirm that Velero is using the corre # Print the content of the secret and ensure it is correct kubectl -n velero get secret $BSL_SECRET -ojsonpath={.data.$BSL_SECRET_KEY} | base64 --decode + + # Determine which secret and key the VolumeSnapshotLocation is using + VSL_SECRET=$(kubectl get volumesnapshotlocations.velero.io -n velero -o yaml -o jsonpath={.spec.credential.name}) + VSL_SECRET_KEY=$(kubectl get volumesnapshotlocations.velero.io -n velero -o yaml -o jsonpath={.spec.credential.key}) + + # Confirm that the secret exists + kubectl -n velero get secret $VSL_SECRET + + # Print the content of the secret and ensure it is correct + kubectl -n velero get secret $VSL_SECRET -ojsonpath={.data.$VSL_SECRET_KEY} | base64 --decode ``` If the secret can't be found, the secret does not exist within the Velero namespace and must be created. If no output is produced when printing the contents of the secret, the key within the secret may not exist or may have no content. - Ensure that the key exists within the secret's data by checking the output from `kubectl -n velero describe secret $BSL_SECRET`. + Ensure that the key exists within the secret's data by checking the output from `kubectl -n velero describe secret $BSL_SECRET` or `kubectl -n velero describe secret $VSL_SECRET`. If it does not exist, follow the instructions for [editing a Kubernetes secret][12] to add the base64 encoded credentials data. diff --git a/site/content/docs/v1.6/api-types/schedule.md b/site/content/docs/v1.6/api-types/schedule.md index 8ccc299698..3d77a05a29 100644 --- a/site/content/docs/v1.6/api-types/schedule.md +++ b/site/content/docs/v1.6/api-types/schedule.md @@ -125,6 +125,9 @@ spec: # processed. Only "exec" hooks are supported. post: # Same content as pre above. + # Specifies whether to use OwnerReferences on backups created by this Schedule. + # Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional. + useOwnerReferencesInBackup: false status: # The current phase of the latest scheduled backup. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed. phase: "" diff --git a/site/content/docs/v1.7/api-types/schedule.md b/site/content/docs/v1.7/api-types/schedule.md index 3c98bef16d..57c1539c8a 100644 --- a/site/content/docs/v1.7/api-types/schedule.md +++ b/site/content/docs/v1.7/api-types/schedule.md @@ -130,6 +130,9 @@ spec: # processed. Only "exec" hooks are supported. post: # Same content as pre above. + # Specifies whether to use OwnerReferences on backups created by this Schedule. + # Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional. + useOwnerReferencesInBackup: false status: # The current phase of the latest scheduled backup. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed. phase: "" diff --git a/site/content/docs/v1.8/api-types/schedule.md b/site/content/docs/v1.8/api-types/schedule.md index 9c5dc26cee..0c7e9fffcb 100644 --- a/site/content/docs/v1.8/api-types/schedule.md +++ b/site/content/docs/v1.8/api-types/schedule.md @@ -132,6 +132,9 @@ spec: # processed. Only "exec" hooks are supported. post: # Same content as pre above. + # Specifies whether to use OwnerReferences on backups created by this Schedule. + # Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional. + useOwnerReferencesInBackup: false status: # The current phase of the latest scheduled backup. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed. phase: "" diff --git a/site/content/docs/v1.9/api-types/schedule.md b/site/content/docs/v1.9/api-types/schedule.md index ccc441999d..69ed033816 100644 --- a/site/content/docs/v1.9/api-types/schedule.md +++ b/site/content/docs/v1.9/api-types/schedule.md @@ -137,6 +137,9 @@ spec: # processed. Only "exec" hooks are supported. post: # Same content as pre above. + # Specifies whether to use OwnerReferences on backups created by this Schedule. + # Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional. + useOwnerReferencesInBackup: false status: # The current phase of the latest scheduled backup. Valid values are New, FailedValidation, InProgress, Completed, PartiallyFailed, Failed. phase: "" diff --git a/site/content/docs/v1.9/how-velero-works.md b/site/content/docs/v1.9/how-velero-works.md index 19fc89a94f..227c927c1a 100644 --- a/site/content/docs/v1.9/how-velero-works.md +++ b/site/content/docs/v1.9/how-velero-works.md @@ -88,6 +88,8 @@ When you create a backup, you can specify a TTL (time to live) by adding the fla The TTL flag allows the user to specify the backup retention period with the value specified in hours, minutes and seconds in the form `--ttl 24h0m0s`. If not specified, a default TTL value of 30 days will be applied. +The effects of expiration are not applied immediately, they are applied when the gc-controller runs its reconciliation loop every hour. + If backup fails to delete, a label `velero.io/gc-failure=` will be added to the backup custom resource. You can use this label to filter and select backups that failed to delete. diff --git a/site/static/img/contributors/pradeep-chaturvedi.png b/site/static/img/contributors/pradeep-chaturvedi.png new file mode 100644 index 0000000000..ac1a010306 Binary files /dev/null and b/site/static/img/contributors/pradeep-chaturvedi.png differ diff --git a/test/e2e/Makefile b/test/e2e/Makefile index f676fabd6c..8f703cc003 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -89,9 +89,14 @@ ADDITIONAL_BSL_CONFIG ?= FEATURES ?= DEBUG_E2E_TEST ?= false +# Parameters to run migration tests along with all other E2E tests, and both of them should +# be provided or left them all empty to skip migration tests with no influence to other +# E2E tests. DEFAULT_CLUSTER ?= STANDBY_CLUSTER ?= +UPLOADER_TYPE ?= + .PHONY:ginkgo ginkgo: # Make sure ginkgo is in $GOPATH/bin @@ -134,7 +139,8 @@ run: ginkgo -kibishii-directory=$(KIBISHII_DIRECTORY) \ -debug-e2e-test=$(DEBUG_E2E_TEST) \ -default-cluster=$(DEFAULT_CLUSTER) \ - -standby-cluster=$(STANDBY_CLUSTER) + -standby-cluster=$(STANDBY_CLUSTER) \ + -uploader-type=$(UPLOADER_TYPE) build: ginkgo mkdir -p $(OUTPUT_DIR) diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index e4e62db593..5507737180 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -59,8 +59,11 @@ func BackupRestoreTest(useVolumeSnapshots bool) { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + if VeleroCfg.InstallVelero { err = VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace) Expect(err).To(Succeed()) } diff --git a/test/e2e/backups/deletion.go b/test/e2e/backups/deletion.go index 0663e554ad..274df9d200 100644 --- a/test/e2e/backups/deletion.go +++ b/test/e2e/backups/deletion.go @@ -64,12 +64,16 @@ func backup_deletion_test(useVolumeSnapshots bool) { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + if VeleroCfg.InstallVelero { err = VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace) Expect(err).To(Succeed()) } } + }) When("kibishii is the sample workload", func() { @@ -118,12 +122,14 @@ func runBackupDeletionTests(client TestClient, veleroCfg VerleroConfig, backupNa BackupCfg.BackupLocation = backupLocation BackupCfg.UseVolumeSnapshots = useVolumeSnapshots BackupCfg.Selector = "" - if err := VeleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, BackupCfg); err != nil { - // TODO currently, the upgrade case covers the upgrade path from 1.6 to main and the velero v1.6 doesn't support "debug" command - // TODO move to "runDebug" after we bump up to 1.7 in the upgrade case - VeleroBackupLogs(context.Background(), VeleroCfg.UpgradeFromVeleroCLI, veleroNamespace, backupName) - return errors.Wrapf(err, "Failed to backup kibishii namespace %s", deletionTest) - } + + By(fmt.Sprintf("Back up workload with name %s", BackupCfg.BackupName), func() { + Expect(VeleroBackupNamespace(oneHourTimeout, veleroCLI, + veleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCLI, veleroNamespace, BackupCfg.BackupName, "") + return "Fail to backup workload" + }) + }) if providerName == "vsphere" && useVolumeSnapshots { // Wait for uploads started by the Velero Plug-in for vSphere to complete @@ -172,22 +178,27 @@ func runBackupDeletionTests(client TestClient, veleroCfg VerleroConfig, backupNa return errors.Wrap(err, "exceed waiting for snapshot created in cloud") } } - backupName = "backup-1-" + UUIDgen.String() - if err := VeleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, BackupCfg); err != nil { - // TODO currently, the upgrade case covers the upgrade path from 1.6 to main and the velero v1.6 doesn't support "debug" command - // TODO move to "runDebug" after we bump up to 1.7 in the upgrade case - VeleroBackupLogs(context.Background(), VeleroCfg.UpgradeFromVeleroCLI, veleroNamespace, backupName) - return errors.Wrapf(err, "Failed to backup kibishii namespace %s", deletionTest) - } + BackupCfg.BackupName = backupName + + By(fmt.Sprintf("Back up workload with name %s", BackupCfg.BackupName), func() { + Expect(VeleroBackupNamespace(oneHourTimeout, veleroCLI, + veleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), veleroCLI, veleroNamespace, BackupCfg.BackupName, "") + return "Fail to backup workload" + }) + }) + err = DeleteObjectsInBucket(VeleroCfg.CloudProvider, VeleroCfg.CloudCredentialsFile, VeleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix) if err != nil { return err } + err = ObjectsShouldNotBeInBucket(VeleroCfg.CloudProvider, VeleroCfg.CloudCredentialsFile, VeleroCfg.BSLBucket, bslPrefix, bslConfig, backupName, BackupObjectsPrefix, 1) if err != nil { return err } + err = DeleteBackupResource(context.Background(), veleroCLI, backupName) if err != nil { return errors.Wrapf(err, "|| UNEXPECTED || - Failed to delete backup %q", backupName) diff --git a/test/e2e/backups/schedule.go b/test/e2e/backups/schedule.go new file mode 100644 index 0000000000..8cabe529c6 --- /dev/null +++ b/test/e2e/backups/schedule.go @@ -0,0 +1,166 @@ +package backups + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/vmware-tanzu/velero/test/e2e" + . "github.com/vmware-tanzu/velero/test/e2e/test" + . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" + . "github.com/vmware-tanzu/velero/test/e2e/util/velero" +) + +type ScheduleBackup struct { + TestCase + ScheduleName string + ScheduleArgs []string + Period int //Limitation: The unit is minitue only and 60 is divisible by it + randBackupName string + verifyTimes int +} + +var ScheduleBackupTest func() = TestFunc(&ScheduleBackup{TestCase: TestCase{NSBaseName: "ns", NSIncluded: &[]string{"ns1"}}}) + +func (n *ScheduleBackup) Init() error { + n.Client = TestClientInstance + n.Period = 3 + n.verifyTimes = 5 // More verify times more confidence + n.TestMsg = &TestMSG{ + Desc: "Set up a scheduled backup defined by a Cron expression", + FailedMSG: "Failed to schedule a backup", + Text: "should backup periodly according to the schedule", + } + return nil +} + +func (n *ScheduleBackup) StartRun() error { + + n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String() + n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String() + + n.ScheduleArgs = []string{ + "schedule", "create", "--namespace", VeleroCfg.VeleroNamespace, n.ScheduleName, + "--include-namespaces", strings.Join(*n.NSIncluded, ","), + "--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *", + } + + return nil +} +func (n *ScheduleBackup) CreateResources() error { + n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + for _, ns := range *n.NSIncluded { + By(fmt.Sprintf("Creating namespaces %s ......\n", ns), func() { + Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) + }) + configmaptName := n.NSBaseName + fmt.Printf("Creating configmap %s in namespaces ...%s\n", configmaptName, ns) + _, err := CreateConfigMap(n.Client.ClientGo, ns, configmaptName, nil) + Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", ns)) + Expect(WaitForConfigMapComplete(n.Client.ClientGo, ns, configmaptName)).To(Succeed(), + fmt.Sprintf("ailed to ensure secret completion in namespace: %q", ns)) + } + return nil +} + +func (n *ScheduleBackup) Backup() error { + // Wait until the beginning of the given period to create schedule, it will give us + // a predictable period to wait for the first scheduled backup, and verify no immediate + // scheduled backup was created between schedule creation and first scheduled backup. + By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() { + for i := 0; i < n.Period*60/30; i++ { + time.Sleep(30 * time.Second) + now := time.Now().Minute() + triggerNow := now % n.Period + if triggerNow == 0 { + Expect(VeleroCmdExec(n.Ctx, VeleroCfg.VeleroCLI, n.ScheduleArgs)).To(Succeed()) + break + } + } + }) + return nil +} +func (n *ScheduleBackup) Destroy() error { + By(fmt.Sprintf("Schedule %s is created without any delay\n", n.ScheduleName), func() { + creationTimestamp, err := GetSchedule(context.Background(), VeleroCfg.VeleroNamespace, n.ScheduleName) + Expect(err).To(Succeed()) + + creationTime, err := time.Parse(time.RFC3339, strings.Replace(creationTimestamp, "'", "", -1)) + Expect(err).To(Succeed()) + fmt.Printf("Schedule %s created at %s\n", n.ScheduleName, creationTime) + now := time.Now() + diff := creationTime.Sub(now) + Expect(diff.Minutes() < 1).To(Equal(true)) + }) + + By(fmt.Sprintf("No immediate backup is created by schedule %s\n", n.ScheduleName), func() { + for i := 0; i < n.Period; i++ { + time.Sleep(1 * time.Minute) + now := time.Now() + fmt.Printf("Get backup for #%d time at %v\n", i, now) + //Ignore the last minute in the period avoiding met the 1st backup by schedule + if i != n.Period-1 { + backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName) + Expect(err).To(Succeed()) + Expect(len(backupsInfo) == 0).To(Equal(true)) + } + } + }) + + By("Delay one more minute to make sure the new backup was created in the given period", func() { + time.Sleep(1 * time.Minute) + }) + + By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() { + for i := 0; i < n.verifyTimes; i++ { + fmt.Printf("Start to sleep %d minute #%d time...\n", n.Period, i+1) + time.Sleep(time.Duration(n.Period) * time.Minute) + bMap := make(map[string]string) + backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName) + Expect(err).To(Succeed()) + Expect(len(backupsInfo) == i+2).To(Equal(true)) + for index, bi := range backupsInfo { + bList := strings.Split(bi, ",") + fmt.Printf("Backup %d: %v\n", index, bList) + bMap[bList[0]] = bList[1] + _, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1]) + Expect(err).To(Succeed()) + } + if i == n.verifyTimes-1 { + backupInfo := backupsInfo[rand.Intn(len(backupsInfo))] + n.randBackupName = strings.Split(backupInfo, ",")[0] + } + } + }) + + n.BackupName = strings.Replace(n.randBackupName, " ", "", -1) + + By("Delete all namespaces", func() { + Expect(CleanupNamespacesWithPoll(n.Ctx, n.Client, n.NSBaseName)).To(Succeed(), "Could cleanup retrieve namespaces") + }) + + n.RestoreArgs = []string{ + "create", "--namespace", VeleroCfg.VeleroNamespace, "restore", n.RestoreName, + "--from-backup", n.BackupName, + "--wait", + } + + return nil +} + +func (n *ScheduleBackup) Verify() error { + By("Namespaces were restored", func() { + for _, ns := range *n.NSIncluded { + configmap, err := GetConfigmap(n.Client.ClientGo, ns, n.NSBaseName) + fmt.Printf("Restored configmap is %v\n", configmap) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q\n", ns)) + } + + }) + return nil +} diff --git a/test/e2e/backups/sync_backups.go b/test/e2e/backups/sync_backups.go index db062e67d3..deb9befa64 100644 --- a/test/e2e/backups/sync_backups.go +++ b/test/e2e/backups/sync_backups.go @@ -64,11 +64,15 @@ func BackupsSyncTest() { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + if VeleroCfg.InstallVelero { Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) } } + }) It("Backups in object storage should be synced to a new Velero successfully", func() { @@ -76,10 +80,11 @@ func BackupsSyncTest() { By(fmt.Sprintf("Prepare workload as target to backup by creating namespace %s namespace", test.testNS)) Expect(CreateNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS)).To(Succeed(), fmt.Sprintf("Failed to create %s namespace", test.testNS)) - - defer func() { - Expect(DeleteNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) - }() + if !VeleroCfg.Debug { + defer func() { + Expect(DeleteNamespace(test.ctx, *VeleroCfg.ClientToInstallVelero, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) + }() + } var BackupCfg BackupConfig BackupCfg.BackupName = test.backupName @@ -88,10 +93,11 @@ func BackupsSyncTest() { BackupCfg.UseVolumeSnapshots = false BackupCfg.Selector = "" By(fmt.Sprintf("Backup the workload in %s namespace", test.testNS), func() { - if err = VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg); err != nil { + Expect(VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, test.backupName, "") - } - Expect(err).To(Succeed(), fmt.Sprintf("Failed to backup %s namespace", test.testNS)) + return "Fail to backup workload" + }) }) By("Uninstall velero", func() { @@ -120,7 +126,6 @@ func BackupsSyncTest() { fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) }() } - var BackupCfg BackupConfig BackupCfg.BackupName = test.backupName BackupCfg.Namespace = test.testNS @@ -128,10 +133,11 @@ func BackupsSyncTest() { BackupCfg.UseVolumeSnapshots = false BackupCfg.Selector = "" By(fmt.Sprintf("Backup the workload in %s namespace", test.testNS), func() { - if err = VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg); err != nil { + Expect(VeleroBackupNamespace(test.ctx, VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, test.backupName, "") - } - Expect(err).To(Succeed(), fmt.Sprintf("Failed to backup %s namespace", test.testNS)) + return "Fail to backup workload" + }) }) By(fmt.Sprintf("Delete %s backup files in object store", test.backupName), func() { diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index 78f1915a11..6ce8816a93 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -58,12 +58,11 @@ func (b *TTL) Init() { } func TTLTest() { + var err error useVolumeSnapshots := true test := new(TTL) - client, err := NewTestClient(VeleroCfg.DefaultCluster) - if err != nil { - println(err.Error()) - } + client := *VeleroCfg.ClientToInstallVelero + //Expect(err).To(Succeed(), "Failed to instantiate cluster client for backup tests") BeforeEach(func() { @@ -76,12 +75,15 @@ func TTLTest() { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - VeleroCfg.GCFrequency = "" - if !VeleroCfg.Debug { + VeleroCfg.GCFrequency = "" + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + if VeleroCfg.InstallVelero { Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) - Expect(DeleteNamespace(test.ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) } + Expect(DeleteNamespace(test.ctx, client, test.testNS, false)).To(Succeed(), fmt.Sprintf("Failed to delete the namespace %s", test.testNS)) } }) diff --git a/test/e2e/basic/enable_api_group_versions.go b/test/e2e/basic/enable_api_group_versions.go index 86afaf8926..862854a02c 100644 --- a/test/e2e/basic/enable_api_group_versions.go +++ b/test/e2e/basic/enable_api_group_versions.go @@ -40,6 +40,128 @@ import ( . "github.com/vmware-tanzu/velero/test/e2e/util/velero" ) +func APIExtensionsVersionsTest() { + var ( + backupName, restoreName string + ) + resourceName := "apiextensions.k8s.io" + crdName := "rocknrollbands.music.example.io" + label := "for=backup" + srcCrdYaml := "testdata/enable_api_group_versions/case-a-source-v1beta1.yaml" + BeforeEach(func() { + if VeleroCfg.DefaultCluster == "" && VeleroCfg.StandbyCluster == "" { + Skip("CRD with apiextension versions migration test needs 2 clusters") + } + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) + srcVersions, err := GetAPIVersions(VeleroCfg.DefaultClient, resourceName) + Expect(err).ShouldNot(HaveOccurred()) + dstVersions, err := GetAPIVersions(VeleroCfg.StandbyClient, resourceName) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(srcVersions).Should(ContainElement("v1"), func() string { + Skip("CRD with apiextension versions srcVersions should have v1") + return "" + }) + Expect(srcVersions).Should(ContainElement("v1beta1"), func() string { + Skip("CRD with apiextension versions srcVersions should have v1") + return "" + }) + Expect(dstVersions).Should(ContainElement("v1"), func() string { + Skip("CRD with apiextension versions dstVersions should have v1") + return "" + }) + Expect(len(srcVersions) > 1 && len(dstVersions) == 1).Should(Equal(true), func() string { + Skip("Source cluster should support apiextension v1 and v1beta1, destination cluster should only support apiextension v1") + return "" + }) + }) + AfterEach(func() { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.DefaultClient) + }) + if VeleroCfg.InstallVelero { + By("Uninstall Velero and delete CRD ", func() { + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) + Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace)).To(Succeed()) + Expect(deleteCRDByName(context.Background(), crdName)).To(Succeed()) + + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.StandbyCluster)).To(Succeed()) + Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace)).To(Succeed()) + Expect(deleteCRDByName(context.Background(), crdName)).To(Succeed()) + }) + } + By(fmt.Sprintf("Switch to default kubeconfig context %s", VeleroCfg.DefaultCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) + VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient + }) + } + + }) + Context("When EnableAPIGroupVersions flag is set", func() { + It("Enable API Group to B/R CRD APIExtensionsVersions", func() { + backupName = "backup-" + UUIDgen.String() + restoreName = "restore-" + UUIDgen.String() + + By(fmt.Sprintf("Install Velero in cluster-A (%s) to backup workload", VeleroCfg.DefaultCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) + VeleroCfg.ObjectStoreProvider = "" + VeleroCfg.Features = "EnableAPIGroupVersions" + Expect(VeleroInstall(context.Background(), &VeleroCfg, false)).To(Succeed()) + }) + + By(fmt.Sprintf("Install CRD of apiextenstions v1beta1 in cluster-A (%s)", VeleroCfg.DefaultCluster), func() { + Expect(installCRD(context.Background(), srcCrdYaml)).To(Succeed()) + Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) + Expect(AddLabelToCRD(context.Background(), crdName, label)).To(Succeed()) + }) + + By("Backup CRD", func() { + var BackupCfg BackupConfig + BackupCfg.BackupName = backupName + BackupCfg.IncludeResources = "crd" + BackupCfg.IncludeClusterResources = true + BackupCfg.Selector = label + Expect(VeleroBackupNamespace(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, backupName, "") + return "Fail to backup workload" + }) + }) + + By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", VeleroCfg.StandbyCluster), func() { + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.StandbyCluster)).To(Succeed()) + VeleroCfg.ObjectStoreProvider = "" + VeleroCfg.ClientToInstallVelero = VeleroCfg.StandbyClient + Expect(VeleroInstall(context.Background(), &VeleroCfg, false)).To(Succeed()) + }) + + By(fmt.Sprintf("Waiting for backups sync to Velero in cluster-B (%s)", VeleroCfg.StandbyCluster), func() { + Expect(WaitForBackupToBeCreated(context.Background(), VeleroCfg.VeleroCLI, backupName, 5*time.Minute)).To(Succeed()) + }) + + By(fmt.Sprintf("CRD %s should not exist in cluster-B (%s)", crdName, VeleroCfg.StandbyCluster), func() { + Expect(CRDShouldNotExist(context.Background(), crdName)).To(Succeed(), "Error: CRD already exists in cluster B, clean it and re-run test") + }) + + By("Restore CRD", func() { + Expect(VeleroRestore(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, restoreName, backupName, "")).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, "", restoreName) + return "Fail to restore workload" + }) + }) + + By("Verify CRD restore ", func() { + Expect(CRDShouldExist(context.Background(), crdName)).To(Succeed()) + }) + }) + }) +} func APIGropuVersionsTest() { var ( resource, group string @@ -63,22 +185,25 @@ func APIGropuVersionsTest() { }) AfterEach(func() { - fmt.Printf("Clean up resource: kubectl delete crd %s.%s\n", resource, group) - cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", resource+"."+group) - _, stderr, err := veleroexec.RunCommand(cmd) - if strings.Contains(stderr, "NotFound") { - fmt.Printf("Ignore error: %v\n", stderr) - err = nil - } - Expect(err).NotTo(HaveOccurred()) - - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { - err = VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace) - Expect(err).NotTo(HaveOccurred()) + if !VeleroCfg.Debug { + fmt.Printf("Clean up resource: kubectl delete crd %s.%s\n", resource, group) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", resource+"."+group) + _, stderr, err := veleroexec.RunCommand(cmd) + if strings.Contains(stderr, "NotFound") { + fmt.Printf("Ignore error: %v\n", stderr) + err = nil + } + Expect(err).NotTo(HaveOccurred()) + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + if VeleroCfg.InstallVelero { + + By("Uninstall Velero", func() { + Expect(VeleroUninstall(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).NotTo(HaveOccurred()) + }) } } - }) Context("When EnableAPIGroupVersions flag is set", func() { @@ -242,11 +367,13 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso BackupCfg.BackupLocation = "" BackupCfg.UseVolumeSnapshots = false BackupCfg.Selector = "" - err = VeleroBackupNamespace(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg) - if err != nil { - RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, backup, "") - return errors.Wrapf(err, "back up %s namespaces on source cluster", namespacesStr) - } + + Expect(VeleroBackupNamespace(ctx, VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace, backup, "") + return "Fail to backup workload" + }) if err := deleteCRD(ctx, tc.srcCrdYaml); err != nil { return errors.Wrapf(err, "delete music-system CRD from source cluster") @@ -299,7 +426,7 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso } // Assertion - if containsAll(annoSpec["annotations"], tc.want["annotations"]) != true { + if !containsAll(annoSpec["annotations"], tc.want["annotations"]) { msg := fmt.Sprintf( "actual annotations: %v, expected annotations: %v", annoSpec["annotations"], @@ -309,7 +436,7 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso } // Assertion - if containsAll(annoSpec["specs"], tc.want["specs"]) != true { + if !containsAll(annoSpec["specs"], tc.want["specs"]) { msg := fmt.Sprintf( "actual specs: %v, expected specs: %v", annoSpec["specs"], @@ -322,8 +449,7 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso // No custom resource should have been restored. Expect "no resource found" // error during restore. err := VeleroRestore(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, restore, backup, "") - - if err.Error() != "Unexpected restore phase got PartiallyFailed, expecting Completed" { + if !strings.Contains(err.Error(), "Unexpected restore phase got PartiallyFailed, expecting Completed") { return errors.New("expected error but not none") } } @@ -339,9 +465,18 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso func installCRD(ctx context.Context, yaml string) error { fmt.Printf("Install CRD with %s.\n", yaml) - cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yaml) + err := KubectlApplyByFile(ctx, yaml) + return err +} + +func deleteCRD(ctx context.Context, yaml string) error { + fmt.Println("Delete CRD", yaml) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", yaml, "--wait") _, stderr, err := veleroexec.RunCommand(cmd) + if strings.Contains(stderr, "not found") { + return nil + } if err != nil { return errors.Wrap(err, stderr) } @@ -349,9 +484,9 @@ func installCRD(ctx context.Context, yaml string) error { return nil } -func deleteCRD(ctx context.Context, yaml string) error { - fmt.Println("Delete CRD", yaml) - cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", yaml, "--wait") +func deleteCRDByName(ctx context.Context, name string) error { + fmt.Println("Delete CRD", name) + cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", name, "--wait") _, stderr, err := veleroexec.RunCommand(cmd) if strings.Contains(stderr, "not found") { diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index 2f72f23440..3a8d56ccef 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -57,7 +57,7 @@ func (n *NamespaceMapping) StartRun() error { n.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", n.BackupName, "--include-namespaces", strings.Join(*n.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } n.RestoreArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "restore", n.RestoreName, diff --git a/test/e2e/basic/resources-check/namespaces.go b/test/e2e/basic/resources-check/namespaces.go index ae6e15455a..7c1d6ad4f6 100644 --- a/test/e2e/basic/resources-check/namespaces.go +++ b/test/e2e/basic/resources-check/namespaces.go @@ -81,7 +81,7 @@ func (m *MultiNSBackup) StartRun() error { m.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", m.BackupName, "--exclude-namespaces", strings.Join(*m.NSExcluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } m.RestoreArgs = []string{ diff --git a/test/e2e/basic/resources-check/namespaces_annotation.go b/test/e2e/basic/resources-check/namespaces_annotation.go index f3f602147c..5033dd14ec 100644 --- a/test/e2e/basic/resources-check/namespaces_annotation.go +++ b/test/e2e/basic/resources-check/namespaces_annotation.go @@ -56,7 +56,7 @@ func (n *NSAnnotationCase) Init() error { n.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", n.BackupName, "--include-namespaces", strings.Join(*n.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } n.RestoreArgs = []string{ diff --git a/test/e2e/basic/resources-check/rbac.go b/test/e2e/basic/resources-check/rbac.go index c07329d817..7657669cfb 100644 --- a/test/e2e/basic/resources-check/rbac.go +++ b/test/e2e/basic/resources-check/rbac.go @@ -71,7 +71,7 @@ func (r *RBACCase) Init() error { r.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", r.BackupName, "--include-namespaces", strings.Join(*r.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } r.RestoreArgs = []string{ diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index 9800237d42..20b591d73f 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -71,15 +71,23 @@ func BslDeletionTest(useVolumeSnapshots bool) { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.DefaultClient) + }) + By(fmt.Sprintf("Delete sample workload namespace %s", bslDeletionTestNs), func() { Expect(DeleteNamespace(context.Background(), *VeleroCfg.ClientToInstallVelero, bslDeletionTestNs, true)).To(Succeed(), fmt.Sprintf("failed to delete the namespace %q", bslDeletionTestNs)) - Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace)).To(Succeed()) + }) + if VeleroCfg.InstallVelero { + By("Uninstall Velero", func() { + Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, + VeleroCfg.VeleroNamespace)).To(Succeed()) + }) } } + }) When("kibishii is the sample workload", func() { @@ -181,7 +189,10 @@ func BslDeletionTest(useVolumeSnapshots bool) { // TODO currently, the upgrade case covers the upgrade path from 1.6 to main and the velero v1.6 doesn't support "debug" command // TODO move to "runDebug" after we bump up to 1.7 in the upgrade case Expect(VeleroBackupNamespace(oneHourTimeout, VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed()) + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg.BackupName, "") + return "Fail to backup workload" + }) }) BackupCfg.BackupName = backupName_2 @@ -189,7 +200,10 @@ func BslDeletionTest(useVolumeSnapshots bool) { BackupCfg.Selector = label_2 By(fmt.Sprintf("Back up the other one PV of sample workload with label-2 into the additional BSL %s", backupLocation_2), func() { Expect(VeleroBackupNamespace(oneHourTimeout, VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed()) + VeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupCfg.BackupName, "") + return "Fail to backup workload" + }) }) if useVolumeSnapshots { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 9b3890c96a..1025d02166 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -35,6 +35,7 @@ import ( . "github.com/vmware-tanzu/velero/test/e2e/bsl-mgmt" . "github.com/vmware-tanzu/velero/test/e2e/orderedresources" . "github.com/vmware-tanzu/velero/test/e2e/privilegesmgmt" + . "github.com/vmware-tanzu/velero/test/e2e/pv-backup" . "github.com/vmware-tanzu/velero/test/e2e/resource-filtering" . "github.com/vmware-tanzu/velero/test/e2e/scale" . "github.com/vmware-tanzu/velero/test/e2e/upgrade" @@ -77,9 +78,11 @@ func init() { flag.StringVar(&VeleroCfg.GCFrequency, "garbage-collection-frequency", "", "Frequency of garbage collection.") flag.StringVar(&VeleroCfg.DefaultCluster, "default-cluster", "", "Default cluster context for migration test.") flag.StringVar(&VeleroCfg.StandbyCluster, "standby-cluster", "", "Standby cluster context for migration test.") + flag.StringVar(&VeleroCfg.UploaderType, "uploader-type", "", "Identify persistent volume backup uploader.") } -var _ = Describe("[APIGroup] Velero tests with various CRD API group versions", APIGropuVersionsTest) +var _ = Describe("[APIGroup][Common] Velero tests with various CRD API group versions", APIGropuVersionsTest) +var _ = Describe("[APIGroup][APIExtensions] CRD of apiextentions v1beta1 should be B/R successfully from cluster(k8s version < 1.22) to cluster(k8s version >= 1.22)", APIExtensionsVersionsTest) // Test backup and restore of Kibishi using restic var _ = Describe("[Basic][Restic] Velero tests on cluster using the plugin provider for object storage and Restic for volume backups", BackupRestoreWithRestic) @@ -110,6 +113,7 @@ var _ = Describe("[Backups][Deletion][Restic] Velero tests of Restic backup dele var _ = Describe("[Backups][Deletion][Snapshot] Velero tests of snapshot backup deletion", BackupDeletionWithSnapshots) var _ = Describe("[Backups][TTL] Local backups and restic repos will be deleted once the corresponding backup storage location is deleted", TTLTest) var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to a new Velero and deleted backups in object storage are synced to be deleted in Velero", BackupsSyncTest) +var _ = Describe("[Backups][Schedule] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest) var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest) @@ -125,6 +129,9 @@ var _ = Describe("[Schedule][OrederedResources] Backup resources should follow t var _ = Describe("[NamespaceMapping][Single] Backup resources should follow the specific order in schedule", OneNamespaceMappingTest) var _ = Describe("[NamespaceMapping][Multiple] Backup resources should follow the specific order in schedule", MultiNamespacesMappingTest) +var _ = Describe("[pv-backup][Opt-In] Backup resources should follow the specific order in schedule", OptInPVBackupTest) +var _ = Describe("[pv-backup][Opt-Out] Backup resources should follow the specific order in schedule", OptOutPVBackupTest) + func GetKubeconfigContext() error { var err error var tcDefault, tcStandby TestClient diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index f0f376f611..3e81d8dd15 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -68,8 +68,11 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) } }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.DefaultClient) + }) + if VeleroCfg.InstallVelero { By(fmt.Sprintf("Uninstall Velero and delete sample workload namespace %s", migrationNamespace), func() { Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, @@ -81,12 +84,13 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) VeleroCfg.VeleroNamespace)).To(Succeed()) DeleteNamespace(context.Background(), *VeleroCfg.StandbyClient, migrationNamespace, true) }) - By(fmt.Sprintf("Switch to default kubeconfig context %s", VeleroCfg.DefaultClient), func() { - Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) - VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient - }) } + By(fmt.Sprintf("Switch to default kubeconfig context %s", VeleroCfg.DefaultClient), func() { + Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) + VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient + }) } + }) When("kibishii is the sample workload", func() { It("should be successfully backed up and restored to the default BackupStorageLocation", func() { @@ -109,11 +113,9 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) } }) } - + OriginVeleroCfg := VeleroCfg By(fmt.Sprintf("Install Velero in cluster-A (%s) to backup workload", VeleroCfg.DefaultCluster), func() { Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed()) - - OriginVeleroCfg := VeleroCfg OriginVeleroCfg.MigrateFromVeleroVersion = veleroCLI2Version.VeleroVersion OriginVeleroCfg.VeleroCLI = veleroCLI2Version.VeleroCLI OriginVeleroCfg.ClientToInstallVelero = OriginVeleroCfg.DefaultClient @@ -122,6 +124,8 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) OriginVeleroCfg.VeleroImage = "" OriginVeleroCfg.ResticHelperImage = "" OriginVeleroCfg.Plugins = "" + //TODO: Remove this once origin Velero version is 1.10 and upper + OriginVeleroCfg.UploaderType = "" } fmt.Println(OriginVeleroCfg) Expect(VeleroInstall(context.Background(), &OriginVeleroCfg, useVolumeSnapshots)).To(Succeed()) @@ -152,11 +156,16 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) BackupStorageClassCfg.BackupName = backupScName BackupStorageClassCfg.IncludeResources = "StorageClass" BackupStorageClassCfg.IncludeClusterResources = true - Expect(VeleroBackupNamespace(context.Background(), VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, BackupStorageClassCfg)).ShouldNot(HaveOccurred(), func() string { - err = VeleroBackupLogs(context.Background(), VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, backupName) - return "Get backup logs" + //TODO Remove UseRestic parameter once minor version is 1.10 or upper + BackupStorageClassCfg.UseRestic = true + if veleroCLI2Version.VeleroVersion == "self" { + BackupStorageClassCfg.UseRestic = false + } + + Expect(VeleroBackupNamespace(context.Background(), OriginVeleroCfg.VeleroCLI, + OriginVeleroCfg.VeleroNamespace, BackupStorageClassCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, BackupStorageClassCfg.BackupName, "") + return "Fail to backup workload" }) var BackupCfg BackupConfig @@ -165,14 +174,16 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) BackupCfg.UseVolumeSnapshots = useVolumeSnapshots BackupCfg.BackupLocation = "" BackupCfg.Selector = "" - //BackupCfg.ExcludeResources = "tierentitlementbindings,tierentitlements,tiers,capabilities,customresourcedefinitions" - Expect(VeleroBackupNamespace(context.Background(), VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, BackupCfg)).ShouldNot(HaveOccurred(), func() string { - err = VeleroBackupLogs(context.Background(), VeleroCfg.VeleroCLI, - VeleroCfg.VeleroNamespace, backupName) - return "Get backup logs" + //TODO Remove UseRestic parameter once minor version is 1.10 or upper + BackupCfg.UseRestic = true + if veleroCLI2Version.VeleroVersion == "self" { + BackupCfg.UseRestic = false + } + Expect(VeleroBackupNamespace(context.Background(), OriginVeleroCfg.VeleroCLI, + OriginVeleroCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), OriginVeleroCfg.VeleroCLI, OriginVeleroCfg.VeleroNamespace, BackupCfg.BackupName, "") + return "Fail to backup workload" }) - }) if useVolumeSnapshots { diff --git a/test/e2e/orderedresources/ordered_resources.go b/test/e2e/orderedresources/ordered_resources.go index af6074ee8f..44669cdc0c 100644 --- a/test/e2e/orderedresources/ordered_resources.go +++ b/test/e2e/orderedresources/ordered_resources.go @@ -112,10 +112,8 @@ func ScheduleOrderedResources() { func (o *OrderedResources) Init() error { rand.Seed(time.Now().UnixNano()) UUIDgen, _ = uuid.NewRandom() - client, err := NewTestClient(VeleroCfg.DefaultCluster) - if err != nil { - return fmt.Errorf("failed to init ordered resources test with err %v", err) - } + client := *VeleroCfg.ClientToInstallVelero + o.Client = client o.ScheduleName = "schedule-ordered-resources-" + UUIDgen.String() o.NSBaseName = "schedule-ordered-resources" @@ -127,7 +125,7 @@ func (o *OrderedResources) Init() error { } o.ScheduleArgs = []string{"--schedule", "@every 1m", - "--include-namespaces", o.Namespace, "--default-volumes-to-restic", "--ordered-resources"} + "--include-namespaces", o.Namespace, "--default-volumes-to-fs-backup", "--ordered-resources"} var orderStr string for kind, resource := range o.OrderMap { orderStr += fmt.Sprintf("%s=%s;", kind, resource) diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go new file mode 100644 index 0000000000..3de103894b --- /dev/null +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -0,0 +1,210 @@ +package basic + +import ( + "context" + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + . "github.com/vmware-tanzu/velero/test/e2e" + . "github.com/vmware-tanzu/velero/test/e2e/test" + . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" +) + +type PVBackupFiltering struct { + TestCase + annotation string + podsList [][]string + volumesList [][]string + id string +} + +const POD_COUNT, VOLUME_COUNT_PER_POD = 2, 3 +const OPT_IN_ANN, OPT_OUT_ANN = "backup.velero.io/backup-volumes", "backup.velero.io/backup-volumes-excludes" +const FILE_NAME = "test-data.txt" + +var OptInPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_IN_ANN, id: "opt-in"}) +var OptOutPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_OUT_ANN, id: "opt-out"}) + +func (p *PVBackupFiltering) Init() error { + p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + p.Client = TestClientInstance + p.NSBaseName = "ns" + p.NSIncluded = &[]string{fmt.Sprintf("%s-%s-%d", p.NSBaseName, p.id, 1), fmt.Sprintf("%s-%s-%d", p.NSBaseName, p.id, 2)} + + p.TestMsg = &TestMSG{ + Desc: "Backup PVs filtering by opt-in/opt-out annotation", + FailedMSG: "Failed to PVs filtering by opt-in/opt-out annotation", + Text: fmt.Sprintf("Should backup PVs in namespace %s according to annotation %s", *p.NSIncluded, p.annotation), + } + return nil +} + +func (p *PVBackupFiltering) StartRun() error { + err := installStorageClass(context.Background(), fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider)) + if err != nil { + return err + } + p.BackupName = p.BackupName + "backup-opt-in-" + UUIDgen.String() + p.RestoreName = p.RestoreName + "restore-opt-in-" + UUIDgen.String() + p.BackupArgs = []string{ + "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", p.BackupName, + "--include-namespaces", strings.Join(*p.NSIncluded, ","), + "--snapshot-volumes=false", "--wait", + } + // "--default-volumes-to-fs-backup" is an overall switch, if it's set, then opt-in + // annotation will be ignored, so it's only set for opt-out test + if p.annotation == OPT_OUT_ANN { + p.BackupArgs = append(p.BackupArgs, "--default-volumes-to-fs-backup") + + } + p.RestoreArgs = []string{ + "create", "--namespace", VeleroCfg.VeleroNamespace, "restore", p.RestoreName, + "--from-backup", p.BackupName, "--wait", + } + return nil +} +func (p *PVBackupFiltering) CreateResources() error { + p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + for _, ns := range *p.NSIncluded { + By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() { + Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns)) + }) + var pods []string + By(fmt.Sprintf("Deploy a few pods with several PVs in namespace %s", ns), func() { + var volumesToAnnotation string + //Make sure PVC name is unique from other tests to avoid PVC creation error + for i := 0; i <= POD_COUNT-1; i++ { + var volumeToAnnotationList []string + var volumes []string + for j := 0; j <= VOLUME_COUNT_PER_POD-1; j++ { + volume := fmt.Sprintf("volume-%s-%d-%d", p.id, i, j) + volumes = append(volumes, volume) + //Volumes cherry pick policy for opt-in/out annotation to pods + if j%2 == 0 { + volumeToAnnotationList = append(volumeToAnnotationList, volume) + } + } + p.volumesList = append(p.volumesList, volumes) + volumesToAnnotation = strings.Join(volumeToAnnotationList, ",") + podName := fmt.Sprintf("pod-%d", i) + pods = append(pods, podName) + By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() { + pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", volumes) + Expect(err).To(Succeed()) + ann := map[string]string{ + p.annotation: volumesToAnnotation, + } + By(fmt.Sprintf("Add annotation to pod %s of namespace %s", pod.Name, ns), func() { + _, err := AddAnnotationToPod(p.Ctx, p.Client, ns, pod.Name, ann) + Expect(err).To(Succeed()) + }) + }) + } + }) + p.podsList = append(p.podsList, pods) + } + By(fmt.Sprintf("Waiting for all pods to start %s\n", p.podsList), func() { + for index, ns := range *p.NSIncluded { + By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() { + WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + }) + } + }) + By(fmt.Sprintf("Polulate all pods %s with file %s", p.podsList, FILE_NAME), func() { + for index, ns := range *p.NSIncluded { + By(fmt.Sprintf("Creating file in all pods to start %d in namespace %s", index, ns), func() { + WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + for i, pod := range p.podsList[index] { + for j := range p.volumesList[i] { + Expect(CreateFileToPod(p.Ctx, ns, pod, p.volumesList[i][j], + FILE_NAME, fileContent(ns, pod, p.volumesList[i][j]))).To(Succeed()) + } + } + }) + } + }) + return nil +} + +func (p *PVBackupFiltering) Verify() error { + p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + By(fmt.Sprintf("Waiting for all pods to start %s", p.podsList), func() { + for index, ns := range *p.NSIncluded { + By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() { + WaitForPods(p.Ctx, p.Client, ns, p.podsList[index]) + }) + } + }) + + for k, ns := range *p.NSIncluded { + By("Verify PV backed up according to annotation", func() { + for i := 0; i <= POD_COUNT-1; i++ { + for j := 0; j <= VOLUME_COUNT_PER_POD-1; j++ { + // Same with volumes cherry pick policy to verify backup result + if j%2 == 0 { + if p.annotation == OPT_IN_ANN { + By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { + Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") + }) + } else { + By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { + Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") + }) + } + } else { + if p.annotation == OPT_OUT_ANN { + By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { + Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect") + }) + } else { + By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() { + Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect") + }) + } + } + } + } + }) + } + + return nil +} +func fileContent(namespace, podName, volume string) string { + return fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, podName, volume) +} + +func fileExist(ctx context.Context, namespace, podName, volume string) error { + c, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ", + FILE_NAME, volume, podName, namespace)) + } + c = strings.Replace(c, "\n", "", -1) + origin_content := strings.Replace(fileContent(namespace, podName, volume), "\n", "", -1) + if c == origin_content { + return nil + } else { + return errors.New(fmt.Sprintf("UNEXPECTED: File %s does not exist in volume %s of pod %s in namespace %s.", + FILE_NAME, volume, podName, namespace)) + } +} +func fileNotExist(ctx context.Context, namespace, podName, volume string) error { + _, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME) + if err != nil { + return nil + } else { + return errors.New(fmt.Sprintf("UNEXPECTED: File %s exist in volume %s of pod %s in namespace %s.", + FILE_NAME, volume, podName, namespace)) + } +} + +func installStorageClass(ctx context.Context, yaml string) error { + fmt.Printf("Install storage class with %s.\n", yaml) + err := KubectlApplyByFile(ctx, yaml) + return err +} diff --git a/test/e2e/resource-filtering/base.go b/test/e2e/resource-filtering/base.go index fcfd1fc37c..08def1a7bd 100644 --- a/test/e2e/resource-filtering/base.go +++ b/test/e2e/resource-filtering/base.go @@ -53,7 +53,7 @@ func (f *FilteringCase) Init() error { f.NamespacesTotal = 3 f.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", f.BackupName, - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } f.RestoreArgs = []string{ diff --git a/test/e2e/resource-filtering/exclude_label.go b/test/e2e/resource-filtering/exclude_label.go index 72307fe3fa..b5e46621bd 100644 --- a/test/e2e/resource-filtering/exclude_label.go +++ b/test/e2e/resource-filtering/exclude_label.go @@ -19,12 +19,14 @@ package filtering import ( "context" "fmt" - "strings" "time" - "github.com/pkg/errors" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" + "github.com/pkg/errors" + . "github.com/vmware-tanzu/velero/test/e2e" . "github.com/vmware-tanzu/velero/test/e2e/test" . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" @@ -62,8 +64,8 @@ func (e *ExcludeFromBackup) Init() error { e.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", e.BackupName, - "--include-namespaces", strings.Join(*e.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--include-namespaces", e.NSBaseName, + "--default-volumes-to-fs-backup", "--wait", } e.RestoreArgs = []string{ @@ -75,63 +77,87 @@ func (e *ExcludeFromBackup) Init() error { func (e *ExcludeFromBackup) CreateResources() error { e.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) - for nsNum := 0; nsNum < e.NamespacesTotal; nsNum++ { - namespace := fmt.Sprintf("%s-%00000d", e.NSBaseName, nsNum) - fmt.Printf("Creating resources in namespace ...%s\n", namespace) - labels := e.labels - if nsNum%2 == 0 { - labels = map[string]string{ - "velero.io/exclude-from-backup": "false", - } - } - if err := CreateNamespaceWithLabel(e.Ctx, e.Client, namespace, labels); err != nil { - return errors.Wrapf(err, "Failed to create namespace %s", namespace) - } - serviceAccountName := "default" - // wait until the service account is created before patch the image pull secret - if err := WaitUntilServiceAccountCreated(e.Ctx, e.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { - return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, namespace) - } - // add the image pull secret to avoid the image pull limit issue of Docker Hub - if err := PatchServiceAccountWithImagePullSecret(e.Ctx, e.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { - return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, namespace) - } - //Create deployment - fmt.Printf("Creating deployment in namespaces ...%s\n", namespace) - - deployment := NewDeployment(e.NSBaseName, namespace, e.replica, labels) - deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) - } - err = WaitForReadyDeployment(e.Client.ClientGo, namespace, deployment.Name) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to ensure deployment completion in namespace: %q", namespace)) - } + namespace := e.NSBaseName + // These 2 labels for resources to be included + label1 := map[string]string{ + "meaningless-label-resource-to-include": "true", + } + label2 := map[string]string{ + "velero.io/exclude-from-backup": "false", + } + fmt.Printf("Creating resources in namespace ...%s\n", namespace) + if err := CreateNamespace(e.Ctx, e.Client, namespace); err != nil { + return errors.Wrapf(err, "Failed to create namespace %s", namespace) + } + serviceAccountName := "default" + // wait until the service account is created before patch the image pull secret + if err := WaitUntilServiceAccountCreated(e.Ctx, e.Client, namespace, serviceAccountName, 10*time.Minute); err != nil { + return errors.Wrapf(err, "failed to wait the service account %q created under the namespace %q", serviceAccountName, namespace) + } + // add the image pull secret to avoid the image pull limit issue of Docker Hub + if err := PatchServiceAccountWithImagePullSecret(e.Ctx, e.Client, namespace, serviceAccountName, VeleroCfg.RegistryCredentialFile); err != nil { + return errors.Wrapf(err, "failed to patch the service account %q under the namespace %q", serviceAccountName, namespace) + } + //Create deployment: to be included + fmt.Printf("Creating deployment in namespaces ...%s\n", namespace) + deployment := NewDeployment(e.NSBaseName, namespace, e.replica, label2) + deployment, err := CreateDeployment(e.Client.ClientGo, namespace, deployment) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace)) + } + err = WaitForReadyDeployment(e.Client.ClientGo, namespace, deployment.Name) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to ensure job completion in namespace: %q", namespace)) + } + //Create Secret + secretName := e.NSBaseName + fmt.Printf("Creating secret %s in namespaces ...%s\n", secretName, namespace) + _, err = CreateSecret(e.Client.ClientGo, namespace, secretName, e.labels) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to create secret in the namespace %q", namespace)) + } + err = WaitForSecretsComplete(e.Client.ClientGo, namespace, secretName) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", namespace)) + } + By(fmt.Sprintf("Checking secret %s should exists in namespaces ...%s\n", secretName, namespace), func() { + _, err = GetSecret(e.Client.ClientGo, namespace, e.NSBaseName) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list deployment in namespace: %q", namespace)) + }) + //Create Configmap: to be included + configmaptName := e.NSBaseName + fmt.Printf("Creating configmap %s in namespaces ...%s\n", configmaptName, namespace) + _, err = CreateConfigMap(e.Client.ClientGo, namespace, configmaptName, label1) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to create configmap in the namespace %q", namespace)) + } + err = WaitForConfigMapComplete(e.Client.ClientGo, namespace, configmaptName) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to ensure secret completion in namespace: %q", namespace)) } return nil } func (e *ExcludeFromBackup) Verify() error { - for nsNum := 0; nsNum < e.NamespacesTotal; nsNum++ { - namespace := fmt.Sprintf("%s-%00000d", e.NSBaseName, nsNum) - fmt.Printf("Checking resources in namespaces ...%s\n", namespace) - //Check deployment - _, err := GetDeployment(e.Client.ClientGo, namespace, e.NSBaseName) - if nsNum%2 == 0 { //include - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace)) - } - } else { //exclude - if err == nil { - return fmt.Errorf("failed to exclude deployment in namespaces %q", namespace) - } else { - if apierrors.IsNotFound(err) { //resource should be excluded - return nil - } - return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace)) - } - } - } + namespace := e.NSBaseName + By(fmt.Sprintf("Checking resources in namespaces ...%s\n", namespace), func() { + //Check namespace + checkNS, err := GetNamespace(e.Ctx, e.Client, namespace) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("Could not retrieve test namespace %s", namespace)) + Expect(checkNS.Name == namespace).To(Equal(true), fmt.Sprintf("Retrieved namespace for %s has name %s instead", namespace, checkNS.Name)) + + //Check deployment: should be included + _, err = GetDeployment(e.Client.ClientGo, namespace, e.NSBaseName) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list deployment in namespace: %q", namespace)) + + //Check secrets: secrets should not be included + _, err = GetSecret(e.Client.ClientGo, namespace, e.NSBaseName) + Expect(err).Should(HaveOccurred(), fmt.Sprintf("failed to list deployment in namespace: %q", namespace)) + Expect(apierrors.IsNotFound(err)).To(Equal(true)) + + //Check configmap: should be included + _, err = GetConfigmap(e.Client.ClientGo, namespace, e.NSBaseName) + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to list configmap in namespace: %q", namespace)) + }) return nil } diff --git a/test/e2e/resource-filtering/exclude_namespaces.go b/test/e2e/resource-filtering/exclude_namespaces.go index b76b15fb19..8d195c2476 100644 --- a/test/e2e/resource-filtering/exclude_namespaces.go +++ b/test/e2e/resource-filtering/exclude_namespaces.go @@ -83,7 +83,7 @@ func (e *ExcludeNamespaces) Init() error { "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", e.BackupName, "--exclude-namespaces", strings.Join(*e.nsExcluded, ","), "--include-namespaces", strings.Join(*e.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } e.RestoreArgs = []string{ @@ -96,7 +96,7 @@ func (e *ExcludeNamespaces) Init() error { e.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", e.BackupName, "--include-namespaces", strings.Join(*e.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } e.RestoreArgs = []string{ diff --git a/test/e2e/resource-filtering/exclude_resources.go b/test/e2e/resource-filtering/exclude_resources.go index 1080ee70f1..844a4243e6 100644 --- a/test/e2e/resource-filtering/exclude_resources.go +++ b/test/e2e/resource-filtering/exclude_resources.go @@ -66,7 +66,7 @@ func (e *ExcludeResources) Init() error { "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", e.BackupName, "--include-namespaces", strings.Join(*e.NSIncluded, ","), "--exclude-resources", "secrets", - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } e.RestoreArgs = []string{ @@ -86,7 +86,7 @@ func (e *ExcludeResources) Init() error { e.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", e.BackupName, "--include-namespaces", strings.Join(*e.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } e.RestoreArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "restore", e.RestoreName, diff --git a/test/e2e/resource-filtering/include_namespaces.go b/test/e2e/resource-filtering/include_namespaces.go index 1d6fdf49d6..5783586fad 100644 --- a/test/e2e/resource-filtering/include_namespaces.go +++ b/test/e2e/resource-filtering/include_namespaces.go @@ -73,7 +73,7 @@ func (i *IncludeNamespaces) Init() error { i.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", i.BackupName, "--include-namespaces", strings.Join(*i.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } i.RestoreArgs = []string{ @@ -92,7 +92,7 @@ func (i *IncludeNamespaces) Init() error { i.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", i.BackupName, "--include-namespaces", strings.Join(*i.allTestNamespaces, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } i.RestoreArgs = []string{ diff --git a/test/e2e/resource-filtering/include_resources.go b/test/e2e/resource-filtering/include_resources.go index 102991bc66..268ee6fb85 100644 --- a/test/e2e/resource-filtering/include_resources.go +++ b/test/e2e/resource-filtering/include_resources.go @@ -63,7 +63,7 @@ func (i *IncludeResources) Init() error { i.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", i.BackupName, "--include-resources", "deployments,configmaps", - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } i.RestoreArgs = []string{ @@ -81,7 +81,7 @@ func (i *IncludeResources) Init() error { i.BackupArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", i.BackupName, "--include-namespaces", strings.Join(*i.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } i.RestoreArgs = []string{ "create", "--namespace", VeleroCfg.VeleroNamespace, "restore", i.RestoreName, diff --git a/test/e2e/resource-filtering/label_selector.go b/test/e2e/resource-filtering/label_selector.go index 3b0edfd3d4..fff56b1b56 100644 --- a/test/e2e/resource-filtering/label_selector.go +++ b/test/e2e/resource-filtering/label_selector.go @@ -64,7 +64,7 @@ func (l *LabelSelector) Init() error { "create", "--namespace", VeleroCfg.VeleroNamespace, "backup", l.BackupName, "--selector", "resourcefiltering=true", "--include-namespaces", strings.Join(*l.NSIncluded, ","), - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } l.RestoreArgs = []string{ diff --git a/test/e2e/test/test.go b/test/e2e/test/test.go index fe0ad7e9d9..ee9218a0c1 100644 --- a/test/e2e/test/test.go +++ b/test/e2e/test/test.go @@ -83,8 +83,8 @@ func TestFunc(test VeleroBackupRestoreTest) func() { } }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug { + if VeleroCfg.InstallVelero { Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To((Succeed())) } } @@ -117,9 +117,11 @@ func TestFuncWithMultiIt(tests []VeleroBackupRestoreTest) func() { }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if countIt == len(tests) && !VeleroCfg.Debug { - Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To((Succeed())) + if !VeleroCfg.Debug { + if VeleroCfg.InstallVelero { + if countIt == len(tests) && !VeleroCfg.Debug { + Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To((Succeed())) + } } } }) @@ -162,10 +164,12 @@ func (t *TestCase) Destroy() error { } func (t *TestCase) Restore() error { - if err := VeleroCmdExec(t.Ctx, VeleroCfg.VeleroCLI, t.RestoreArgs); err != nil { - RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName, "") - return errors.Wrapf(err, "Failed to restore resources") - } + By("Start to restore ......", func() { + Expect(VeleroCmdExec(t.Ctx, VeleroCfg.VeleroCLI, t.RestoreArgs)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName, "") + return "Fail to restore workload" + }) + }) return nil } @@ -174,7 +178,15 @@ func (t *TestCase) Verify() error { } func (t *TestCase) Clean() error { - return CleanupNamespacesWithPoll(t.Ctx, t.Client, t.NSBaseName) + if !VeleroCfg.Debug { + By(fmt.Sprintf("Clean namespace with prefix %s after test", t.NSBaseName), func() { + CleanupNamespaces(t.Ctx, t.Client, t.NSBaseName) + }) + By("Clean backups after test", func() { + DeleteBackups(t.Ctx, t.Client) + }) + } + return nil } func (t *TestCase) GetTestMsg() *TestMSG { diff --git a/test/e2e/testdata/enable_api_group_versions/case-a-source-v1beta1.yaml b/test/e2e/testdata/enable_api_group_versions/case-a-source-v1beta1.yaml new file mode 100644 index 0000000000..c72bb52004 --- /dev/null +++ b/test/e2e/testdata/enable_api_group_versions/case-a-source-v1beta1.yaml @@ -0,0 +1,90 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: music-system/music-serving-cert + controller-gen.kubebuilder.io/version: v0.2.5 + name: rocknrollbands.music.example.io +spec: + group: music.example.io + names: + kind: RocknrollBand + listKind: RocknrollBandList + plural: rocknrollbands + singular: rocknrollband + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: RocknrollBand is the Schema for the rocknrollbands API + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: RocknrollBandSpec defines the desired state of RocknrollBand + properties: + genre: + type: string + leadSinger: + type: string + numberComponents: + format: int32 + type: integer + type: object + status: + description: RocknrollBandStatus defines the observed state of RocknrollBand + properties: + lastPlayed: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: RocknrollBand is the Schema for the rocknrollbands API + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: RocknrollBandSpec defines the desired state of RocknrollBand + properties: + genre: + type: string + numberComponents: + format: int32 + type: integer + type: object + status: + description: RocknrollBandStatus defines the observed state of RocknrollBand + properties: + lastPlayed: + type: string + required: + - lastPlayed + type: object + type: object + served: true + storage: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/e2e/testdata/storage-class/aws.yaml b/test/e2e/testdata/storage-class/aws.yaml new file mode 100644 index 0000000000..29e79c8eac --- /dev/null +++ b/test/e2e/testdata/storage-class/aws.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: e2e-storage-class +provisioner: kubernetes.io/aws-ebs +parameters: + type: gp2 +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer diff --git a/test/e2e/testdata/storage-class/azure.yaml b/test/e2e/testdata/storage-class/azure.yaml new file mode 100644 index 0000000000..aa9451bf67 --- /dev/null +++ b/test/e2e/testdata/storage-class/azure.yaml @@ -0,0 +1,11 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: e2e-storage-class +provisioner: kubernetes.io/azure-disk +parameters: + cachingmode: ReadOnly + kind: Managed + storageaccounttype: StandardSSD_LRS +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer diff --git a/test/e2e/testdata/storage-class/gcp.yaml b/test/e2e/testdata/storage-class/gcp.yaml new file mode 100644 index 0000000000..30ee8fc1fa --- /dev/null +++ b/test/e2e/testdata/storage-class/gcp.yaml @@ -0,0 +1,13 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: e2e-storage-class +parameters: + type: pd-standard +provisioner: kubernetes.io/gce-pd +reclaimPolicy: Delete +volumeBindingMode: volumeBindingMode: WaitForFirstConsumer + diff --git a/test/e2e/testdata/storage-class/vsphere.yaml b/test/e2e/testdata/storage-class/vsphere.yaml new file mode 100644 index 0000000000..3d06ffdf0b --- /dev/null +++ b/test/e2e/testdata/storage-class/vsphere.yaml @@ -0,0 +1,11 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: e2e-storage-class + annotations: + storageclass.kubernetes.io/is-default-class: "false" +parameters: + StoragePolicyName: "vSAN Default Storage Policy" +provisioner: csi.vsphere.vmware.com +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/test/e2e/types.go b/test/e2e/types.go index bb54562e19..068d957ebb 100644 --- a/test/e2e/types.go +++ b/test/e2e/types.go @@ -63,6 +63,7 @@ type VerleroConfig struct { ClientToInstallVelero *TestClient DefaultClient *TestClient StandbyClient *TestClient + UploaderType string } type SnapshotCheckPoint struct { @@ -86,6 +87,7 @@ type BackupConfig struct { ExcludeResources string IncludeClusterResources bool OrderedResources string + UseRestic bool } type VeleroCLI2Version struct { diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 488e25961b..a46231b464 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -70,11 +70,14 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC } }) AfterEach(func() { - if VeleroCfg.InstallVelero { - if !VeleroCfg.Debug { - By(fmt.Sprintf("Delete sample workload namespace %s", upgradeNamespace), func() { - DeleteNamespace(context.Background(), *VeleroCfg.ClientToInstallVelero, upgradeNamespace, true) - }) + if !VeleroCfg.Debug { + By("Clean backups after test", func() { + DeleteBackups(context.Background(), *VeleroCfg.ClientToInstallVelero) + }) + By(fmt.Sprintf("Delete sample workload namespace %s", upgradeNamespace), func() { + DeleteNamespace(context.Background(), *VeleroCfg.ClientToInstallVelero, upgradeNamespace, true) + }) + if VeleroCfg.InstallVelero { By("Uninstall Velero", func() { Expect(VeleroUninstall(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace)).To(Succeed()) @@ -109,6 +112,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC tmpCfgForOldVeleroInstall.VeleroImage = "" tmpCfgForOldVeleroInstall.ResticHelperImage = "" tmpCfgForOldVeleroInstall.Plugins = "" + tmpCfgForOldVeleroInstall.UploaderType = "" Expect(VeleroInstall(context.Background(), &tmpCfgForOldVeleroInstall, useVolumeSnapshots)).To(Succeed()) @@ -140,11 +144,13 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC BackupCfg.BackupLocation = "" BackupCfg.UseVolumeSnapshots = useVolumeSnapshots BackupCfg.Selector = "" + //TODO: pay attention to this param + BackupCfg.UseRestic = true Expect(VeleroBackupNamespace(oneHourTimeout, tmpCfg.UpgradeFromVeleroCLI, - tmpCfg.VeleroNamespace, BackupCfg)).ShouldNot(HaveOccurred(), func() string { - err = VeleroBackupLogs(context.Background(), tmpCfg.UpgradeFromVeleroCLI, - tmpCfg.VeleroNamespace, backupName) - return "Get backup logs" + tmpCfg.VeleroNamespace, BackupCfg)).To(Succeed(), func() string { + RunDebug(context.Background(), tmpCfg.UpgradeFromVeleroCLI, tmpCfg.VeleroNamespace, + BackupCfg.BackupName, "") + return "Fail to backup workload" }) }) diff --git a/test/e2e/util/common/common.go b/test/e2e/util/common/common.go index 17e942cab5..f7a43fc71b 100644 --- a/test/e2e/util/common/common.go +++ b/test/e2e/util/common/common.go @@ -38,7 +38,7 @@ func GetListBy2Pipes(ctx context.Context, cmdline1, cmdline2, cmdline3 OsCommand _ = c2.Wait() _ = c3.Wait() - fmt.Println(&b2) + //fmt.Println(&b2) scanner := bufio.NewScanner(&b2) var ret []string for scanner.Scan() { diff --git a/test/e2e/util/k8s/common.go b/test/e2e/util/k8s/common.go index 6a378268cb..55c3721604 100644 --- a/test/e2e/util/k8s/common.go +++ b/test/e2e/util/k8s/common.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" "golang.org/x/net/context" + corev1 "k8s.io/api/core/v1" corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -49,7 +50,6 @@ func CreateSecretFromFiles(ctx context.Context, client TestClient, namespace str data[key] = contents } - secret := builder.ForSecret(namespace, name).Data(data).Result() _, err := client.ClientGo.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) return err @@ -64,7 +64,7 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { //Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout. - fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...", namespace, podName, corev1api.PodRunning))) + fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning))) return false, nil } // If any pod is still waiting we don't need to check any more so return and wait for next poll interval @@ -124,6 +124,45 @@ func GetPvByPvc(ctx context.Context, namespace, pvc string) ([]string, error) { return common.GetListBy2Pipes(ctx, *CmdLine1, *CmdLine2, *CmdLine3) } +func CRDShouldExist(ctx context.Context, name string) error { + return CRDCountShouldBe(ctx, name, 1) +} + +func CRDShouldNotExist(ctx context.Context, name string) error { + return CRDCountShouldBe(ctx, name, 0) +} + +func CRDCountShouldBe(ctx context.Context, name string, count int) error { + crdList, err := GetCRD(ctx, name) + if err != nil { + return errors.Wrap(err, "Fail to get CRDs") + } + len := len(crdList) + if len != count { + return errors.New(fmt.Sprintf("CRD count is expected as %d instead of %d", count, len)) + } + return nil +} + +func GetCRD(ctx context.Context, name string) ([]string, error) { + CmdLine1 := &common.OsCommandLine{ + Cmd: "kubectl", + Args: []string{"get", "crd"}, + } + + CmdLine2 := &common.OsCommandLine{ + Cmd: "grep", + Args: []string{name}, + } + + CmdLine3 := &common.OsCommandLine{ + Cmd: "awk", + Args: []string{"{print $1}"}, + } + + return common.GetListBy2Pipes(ctx, *CmdLine1, *CmdLine2, *CmdLine3) +} + func AddLabelToPv(ctx context.Context, pv, label string) error { return exec.CommandContext(ctx, "kubectl", "label", "pv", pv, label).Run() } @@ -140,6 +179,12 @@ func AddLabelToPod(ctx context.Context, podName, namespace, label string) error return exec.CommandContext(ctx, "kubectl", args...).Run() } +func AddLabelToCRD(ctx context.Context, crd, label string) error { + args := []string{"label", "crd", crd, label} + fmt.Println(args) + return exec.CommandContext(ctx, "kubectl", args...).Run() +} + func KubectlApplyByFile(ctx context.Context, file string) error { args := []string{"apply", "-f", file, "--force=true"} return exec.CommandContext(ctx, "kubectl", args...).Run() @@ -154,3 +199,86 @@ func KubectlConfigUseContext(ctx context.Context, kubectlContext string) error { fmt.Print(stderr) return err } + +func GetAPIVersions(client *TestClient, name string) ([]string, error) { + var version []string + APIGroup, err := client.ClientGo.Discovery().ServerGroups() + if err != nil { + return nil, errors.Wrap(err, "Fail to get server API groups") + } + for _, group := range APIGroup.Groups { + fmt.Println(group.Name) + if group.Name == name { + for _, v := range group.Versions { + fmt.Println(v.Version) + version = append(version, v.Version) + } + return version, nil + } + } + return nil, errors.New("Server API groups is empty") +} + +func GetPVByPodName(client TestClient, namespace, podName string) (string, error) { + pvcList, err := GetPvcByPodName(context.Background(), namespace, podName) + if err != nil { + return "", err + } + if len(pvcList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PVC of pod %s should be found under namespace %s", podName, namespace)) + } + pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0]) + if err != nil { + return "", err + } + if len(pvList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], podName, namespace)) + } + pv_value, err := GetPersistentVolume(context.Background(), client, "", pvList[0]) + fmt.Println(pv_value.Annotations["pv.kubernetes.io/provisioned-by"]) + if err != nil { + return "", err + } + return pv_value.Name, nil +} +func CreatePodWithPVC(client TestClient, ns, podName, sc string, volumeNameList []string) (*corev1.Pod, error) { + volumes := []corev1.Volume{} + for _, volume := range volumeNameList { + pvc, err := CreatePVC(client, ns, fmt.Sprintf("pvc-%s", volume), sc) + if err != nil { + return nil, err + } + volumes = append(volumes, corev1.Volume{ + Name: volume, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: false, + }, + }, + }) + } + pod, err := CreatePod(client, ns, podName, volumes) + if err != nil { + return nil, err + } + return pod, nil +} + +func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error { + arg := []string{"exec", "-n", namespace, "-c", podName, podName, + "--", "/bin/sh", "-c", fmt.Sprintf("echo ns-%s pod-%s volume-%s > /%s/%s", namespace, podName, volume, volume, filename)} + cmd := exec.CommandContext(ctx, "kubectl", arg...) + fmt.Printf("Kubectl exec cmd =%v\n", cmd) + return cmd.Run() +} +func ReadFileFromPodVolume(ctx context.Context, namespace, podName, volume, filename string) (string, error) { + arg := []string{"exec", "-n", namespace, "-c", podName, podName, + "--", "cat", fmt.Sprintf("/%s/%s", volume, filename)} + cmd := exec.CommandContext(ctx, "kubectl", arg...) + fmt.Printf("Kubectl exec cmd =%v\n", cmd) + stdout, stderr, err := veleroexec.RunCommand(cmd) + fmt.Print(stdout) + fmt.Print(stderr) + return stdout, err +} diff --git a/test/e2e/util/k8s/namespace.go b/test/e2e/util/k8s/namespace.go index cbdc644eb5..767aba7ba5 100644 --- a/test/e2e/util/k8s/namespace.go +++ b/test/e2e/util/k8s/namespace.go @@ -100,7 +100,7 @@ func CleanupNamespacesWithPoll(ctx context.Context, client TestClient, nsBaseNam if err != nil { return errors.Wrapf(err, "Could not delete namespace %s", checkNamespace.Name) } - fmt.Printf("Delete namespace %s", checkNamespace.Name) + fmt.Printf("Delete namespace %s\n", checkNamespace.Name) } } return nil diff --git a/test/e2e/util/k8s/persistentvolumes.go b/test/e2e/util/k8s/persistentvolumes.go index 2f414cda44..cc7962cbe8 100644 --- a/test/e2e/util/k8s/persistentvolumes.go +++ b/test/e2e/util/k8s/persistentvolumes.go @@ -18,11 +18,49 @@ package k8s import ( "context" + "fmt" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) { + + p := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "manual", + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Capacity: corev1.ResourceList{corev1.ResourceName(corev1.ResourceStorage): resource.MustParse("2Gi")}, + + PersistentVolumeSource: corev1.PersistentVolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/demo", + }, + }, + }, + } + + return client.ClientGo.CoreV1().PersistentVolumes().Create(context.TODO(), p, metav1.CreateOptions{}) +} + func GetPersistentVolume(ctx context.Context, client TestClient, namespace string, persistentVolume string) (*corev1.PersistentVolume, error) { return client.ClientGo.CoreV1().PersistentVolumes().Get(ctx, persistentVolume, metav1.GetOptions{}) } + +func AddAnnotationToPersistentVolume(ctx context.Context, client TestClient, namespace string, persistentVolume, key string) (*corev1.PersistentVolume, error) { + newPV, err := GetPersistentVolume(ctx, client, "", persistentVolume) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge PV %s", persistentVolume)) + } + ann := newPV.ObjectMeta.Annotations + ann[key] = persistentVolume + newPV.Annotations = ann + + return client.ClientGo.CoreV1().PersistentVolumes().Update(ctx, newPV, metav1.UpdateOptions{}) +} diff --git a/test/e2e/util/k8s/pod.go b/test/e2e/util/k8s/pod.go new file mode 100644 index 0000000000..4caf299a33 --- /dev/null +++ b/test/e2e/util/k8s/pod.go @@ -0,0 +1,79 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) { + vmList := []corev1.VolumeMount{} + for _, v := range volumes { + vmList = append(vmList, corev1.VolumeMount{ + Name: v.Name, + MountPath: "/" + v.Name, + }) + } + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: "gcr.io/velero-gcp/busybox", + Command: []string{"sleep", "3600"}, + VolumeMounts: vmList, + }, + }, + Volumes: volumes, + }, + } + + return client.ClientGo.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{}) +} + +func GetPod(ctx context.Context, client TestClient, namespace string, pod string) (*corev1.Pod, error) { + return client.ClientGo.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{}) +} + +func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podName string, ann map[string]string) (*corev1.Pod, error) { + + newPod, err := GetPod(ctx, client, namespace, podName) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge pod %s in namespace %s", podName, namespace)) + } + newAnn := newPod.ObjectMeta.Annotations + if newAnn == nil { + newAnn = make(map[string]string) + } + for k, v := range ann { + fmt.Println(k, v) + newAnn[k] = v + } + newPod.Annotations = newAnn + fmt.Println(newPod.Annotations) + + return client.ClientGo.CoreV1().Pods(namespace).Update(ctx, newPod, metav1.UpdateOptions{}) +} diff --git a/test/e2e/util/k8s/pvc.go b/test/e2e/util/k8s/pvc.go new file mode 100644 index 0000000000..f6c30a0d67 --- /dev/null +++ b/test/e2e/util/k8s/pvc.go @@ -0,0 +1,51 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CreatePVC(client TestClient, ns, name, sc string) (*corev1.PersistentVolumeClaim, error) { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &sc, + }, + } + + return client.ClientGo.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) +} + +func GetPVC(ctx context.Context, client TestClient, namespace string, persistentVolume string) (*corev1.PersistentVolume, error) { + return client.ClientGo.CoreV1().PersistentVolumes().Get(ctx, persistentVolume, metav1.GetOptions{}) +} diff --git a/test/e2e/util/velero/install.go b/test/e2e/util/velero/install.go index 0b3e5962ba..b40ec17216 100644 --- a/test/e2e/util/velero/install.go +++ b/test/e2e/util/velero/install.go @@ -90,6 +90,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *VerleroConfig, useVolumeSnaps veleroInstallOptions.UseRestic = !useVolumeSnapshots veleroInstallOptions.Image = veleroCfg.VeleroImage veleroInstallOptions.Namespace = veleroCfg.VeleroNamespace + veleroInstallOptions.UploaderType = veleroCfg.UploaderType GCFrequency, _ := time.ParseDuration(veleroCfg.GCFrequency) veleroInstallOptions.GarbageCollectionFrequency = GCFrequency @@ -213,6 +214,10 @@ func installVeleroServer(ctx context.Context, cli string, options *installOption args = append(args, fmt.Sprintf("--garbage-collection-frequency=%v", options.GarbageCollectionFrequency)) } + if len(options.UploaderType) > 0 { + args = append(args, fmt.Sprintf("--uploader-type=%v", options.UploaderType)) + } + if err := createVelereResources(ctx, cli, namespace, args, options.RegistryCredentialFile, options.ResticHelperImage); err != nil { return err } diff --git a/test/e2e/util/velero/velero_utils.go b/test/e2e/util/velero/velero_utils.go index aa54a0cfff..f8495865c1 100644 --- a/test/e2e/util/velero/velero_utils.go +++ b/test/e2e/util/velero/velero_utils.go @@ -37,6 +37,8 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/wait" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" cliinstall "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" @@ -321,10 +323,15 @@ func VeleroBackupNamespace(ctx context.Context, veleroCLI, veleroNamespace strin if backupCfg.UseVolumeSnapshots { args = append(args, "--snapshot-volumes") } else { - args = append(args, "--default-volumes-to-restic") + if backupCfg.UseRestic { + args = append(args, "--default-volumes-to-restic") + } else { + args = append(args, "--default-volumes-to-fs-backup") + } + // To workaround https://github.com/vmware-tanzu/velero-plugin-for-vsphere/issues/347 for vsphere plugin v1.1.1 // if the "--snapshot-volumes=false" isn't specified explicitly, the vSphere plugin will always take snapshots - // for the volumes even though the "--default-volumes-to-restic" is specified + // for the volumes even though the "--default-volumes-to-fs-backup" is specified // TODO This can be removed if the logic of vSphere plugin bump up to 1.3 args = append(args, "--snapshot-volumes=false") } @@ -360,7 +367,7 @@ func VeleroBackupExcludeNamespaces(ctx context.Context, veleroCLI string, velero args := []string{ "--namespace", veleroNamespace, "create", "backup", backupName, "--exclude-namespaces", namespaces, - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } return VeleroBackupExec(ctx, veleroCLI, veleroNamespace, backupName, args) } @@ -371,7 +378,7 @@ func VeleroBackupIncludeNamespaces(ctx context.Context, veleroCLI string, velero args := []string{ "--namespace", veleroNamespace, "create", "backup", backupName, "--include-namespaces", namespaces, - "--default-volumes-to-restic", "--wait", + "--default-volumes-to-fs-backup", "--wait", } return VeleroBackupExec(ctx, veleroCLI, veleroNamespace, backupName, args) } @@ -429,14 +436,19 @@ func VeleroScheduleCreate(ctx context.Context, veleroCLI string, veleroNamespace func VeleroCmdExec(ctx context.Context, veleroCLI string, args []string) error { cmd := exec.CommandContext(ctx, veleroCLI, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + var errBuf, outBuf bytes.Buffer + cmd.Stderr = io.MultiWriter(os.Stderr, &errBuf) + cmd.Stdout = io.MultiWriter(os.Stdout, &outBuf) fmt.Printf("velero cmd =%v\n", cmd) err := cmd.Run() + retAll := outBuf.String() + " " + errBuf.String() + if strings.Contains(strings.ToLower(retAll), "failed") { + return errors.Wrap(err, fmt.Sprintf("velero cmd =%v return with failure\n", cmd)) + } if err != nil { return err } - return err + return nil } func VeleroBackupLogs(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string) error { @@ -866,6 +878,43 @@ func GetBackupsFromBsl(ctx context.Context, veleroCLI, bslName string) ([]string return common.GetListBy2Pipes(ctx, *CmdLine1, *CmdLine2, *CmdLine3) } +func GetScheduledBackupsCreationTime(ctx context.Context, veleroCLI, bslName, scheduleName string) ([]string, error) { + var creationTimes []string + backups, err := GetBackupsCreationTime(ctx, veleroCLI, bslName) + if err != nil { + return nil, err + } + for _, b := range backups { + if strings.Contains(b, scheduleName) { + creationTimes = append(creationTimes, b) + } + } + return creationTimes, nil +} +func GetBackupsCreationTime(ctx context.Context, veleroCLI, bslName string) ([]string, error) { + args1 := []string{"get", "backups"} + createdTime := "$1,\",\" $5,$6,$7,$8" + if strings.TrimSpace(bslName) != "" { + args1 = append(args1, "-l", "velero.io/storage-location="+bslName) + } + CmdLine1 := &common.OsCommandLine{ + Cmd: veleroCLI, + Args: args1, + } + + CmdLine2 := &common.OsCommandLine{ + Cmd: "awk", + Args: []string{"{print " + createdTime + "}"}, + } + + CmdLine3 := &common.OsCommandLine{ + Cmd: "tail", + Args: []string{"-n", "+2"}, + } + + return common.GetListBy2Pipes(ctx, *CmdLine1, *CmdLine2, *CmdLine3) +} + func GetAllBackups(ctx context.Context, veleroCLI string) ([]string, error) { return GetBackupsFromBsl(ctx, veleroCLI, "") } @@ -971,7 +1020,6 @@ func GetSnapshotCheckPoint(client TestClient, VeleroCfg VerleroConfig, expectCou } func GetBackupTTL(ctx context.Context, veleroNamespace, backupName string) (string, error) { - checkSnapshotCmd := exec.CommandContext(ctx, "kubectl", "get", "backup", "-n", veleroNamespace, backupName, "-o=jsonpath='{.spec.ttl}'") fmt.Printf("checkSnapshotCmd cmd =%v\n", checkSnapshotCmd) @@ -979,15 +1027,8 @@ func GetBackupTTL(ctx context.Context, veleroNamespace, backupName string) (stri if err != nil { fmt.Print(stdout) fmt.Print(stderr) - return "", errors.Wrap(err, "failed to verify") + return "", errors.Wrap(err, fmt.Sprintf("failed to run command %s", checkSnapshotCmd)) } - // lines := strings.Split(stdout, "\n") - // complete := true - // for _, curLine := range lines { - // fmt.Println(curLine) - - // } - // return complete, nil return stdout, err } @@ -1008,3 +1049,29 @@ func GetVersionList(veleroCli, veleroVersion string) []VeleroCLI2Version { } return veleroCLI2VersionList } +func DeleteBackups(ctx context.Context, client TestClient) error { + backupList := new(velerov1api.BackupList) + if err := client.Kubebuilder.List(ctx, backupList, &kbclient.ListOptions{Namespace: VeleroCfg.VeleroNamespace}); err != nil { + return fmt.Errorf("failed to list backup object in %s namespace with err %v", VeleroCfg.VeleroNamespace, err) + } + for _, backup := range backupList.Items { + fmt.Printf("Backup %s is going to be deleted...\n", backup.Name) + if err := VeleroBackupDelete(ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, backup.Name); err != nil { + return err + } + } + return nil +} + +func GetSchedule(ctx context.Context, veleroNamespace, scheduleName string) (string, error) { + checkSnapshotCmd := exec.CommandContext(ctx, "kubectl", + "get", "schedule", "-n", veleroNamespace, scheduleName, "-o=jsonpath='{.metadata.creationTimestamp}'") + fmt.Printf("Cmd =%v\n", checkSnapshotCmd) + stdout, stderr, err := veleroexec.RunCommand(checkSnapshotCmd) + if err != nil { + fmt.Print(stdout) + fmt.Print(stderr) + return "", errors.Wrap(err, fmt.Sprintf("failed to run command %s", checkSnapshotCmd)) + } + return stdout, err +}