diff --git a/.github/workflows/crds-verify-kind.yaml b/.github/workflows/crds-verify-kind.yaml index 161b25a135..8aebd88a9e 100644 --- a/.github/workflows/crds-verify-kind.yaml +++ b/.github/workflows/crds-verify-kind.yaml @@ -49,7 +49,7 @@ jobs: run: | make local - # Check the common CLI against all kubernetes versions + # Check the common CLI against all Kubernetes versions crd-check: needs: build-cli runs-on: ubuntu-latest diff --git a/.github/workflows/e2e-test-kind.yaml b/.github/workflows/e2e-test-kind.yaml index 63b04d33d2..20a1fc4cd9 100644 --- a/.github/workflows/e2e-test-kind.yaml +++ b/.github/workflows/e2e-test-kind.yaml @@ -53,7 +53,7 @@ jobs: run: | IMAGE=velero VERSION=pr-test make container docker save velero:pr-test -o ./velero.tar - # Run E2E test against all kubernetes versions on kind + # Run E2E test against all Kubernetes versions on kind run-e2e-test: needs: build runs-on: ubuntu-latest diff --git a/.github/workflows/pr-codespell.yml b/.github/workflows/pr-codespell.yml index ddc2acd4e9..fd4b88c4f1 100644 --- a/.github/workflows/pr-codespell.yml +++ b/.github/workflows/pr-codespell.yml @@ -14,7 +14,44 @@ jobs: uses: codespell-project/actions-codespell@master with: # ignore the config/.../crd.go file as it's generated binary data that is edited elswhere. - skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE + skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast check_filenames: true check_hidden: true + + - name: Velero.io word list check + shell: bash {0} + run: | + IGNORE_COMMENT="Velero.io word list : ignore" + FILES_TO_CHECK=$(find . -type f \ + ! -path "./.git/*" \ + ! -path "./site/content/docs/v*" \ + ! -path "./changelogs/CHANGELOG-*" \ + ! -path "./.github/workflows/pr-codespell.yml" \ + ! -path "./site/static/fonts/Metropolis/Open Font License.md" \ + ! -regex '.*\.\(png\|jpg\|woff\|ttf\|gif\|ico\|svg\)' + ) + function check_word_in_files() { + local word=$1 + + xargs grep -Iinr "$word" <<< "$FILES_TO_CHECK" | \ + grep -v "$IGNORE_COMMENT" | \ + grep -i --color=always "$word" && \ + EXIT_STATUS=1 + } + function check_word_case_sensitive_in_files() { + local word=$1 + + xargs grep -Inr "$word" <<< "$FILES_TO_CHECK" | \ + grep -v "$IGNORE_COMMENT" | \ + grep --color=always "$word" && \ + EXIT_STATUS=1 + } + EXIT_STATUS=0 + check_word_case_sensitive_in_files ' kubernetes ' + check_word_in_files 'on-premise\b' + check_word_in_files 'back-up' + check_word_in_files 'plug-in' + check_word_in_files 'whitelist' + check_word_in_files 'blacklist' + exit $EXIT_STATUS diff --git a/ADOPTERS.md b/ADOPTERS.md index ac5b1761c9..ef0da35610 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -22,10 +22,10 @@ Below is a list of adopters of Velero in **production environments** that have publicly shared the details of how they use it. **[BitGo][20]** -BitGo uses Velero backup and restore capabilities to seamlessly provision and scale fullnode statefulsets on the fly as well as having it serve an integral piece for our kubernetes disaster-recovery story. +BitGo uses Velero backup and restore capabilities to seamlessly provision and scale fullnode statefulsets on the fly as well as having it serve an integral piece for our Kubernetes disaster-recovery story. **[Bugsnag][30]** -We use Velero for managing backups of an internal instance of our on-premise clustered solution. We also recommend our users of [on-premise Bugsnag installations][31] use Velero for [managing their own backups][32]. +We use Velero for managing backups of an internal instance of our on-premise clustered solution. We also recommend our users of [on-premise Bugsnag installations](https://www.bugsnag.com/on-premise) use Velero for [managing their own backups](https://docs.bugsnag.com/on-premise/clustered/backup-restore/). **[Banzai Cloud][60]** [Banzai Cloud Pipeline][61] is a Kubernetes-based microservices platform that integrates services needed for Day-1 and Day-2 operations along with first-class support both for on-prem and hybrid multi-cloud deployments. We use Velero to periodically [backup and restore these clusters in case of disasters][62]. @@ -83,8 +83,6 @@ If you would like to add your logo to a future `Adopters of Velero` section on [ [20]: https://bitgo.com [30]: https://bugsnag.com -[31]: https://www.bugsnag.com/on-premise -[32]: https://docs.bugsnag.com/on-premise/clustered/backup-restore/ [40]: https://kyma-project.io [41]: https://kyma-project.io/docs/components/backup/#overview-overview diff --git a/CHANGELOG.md b/CHANGELOG.md index f8da3fd3cb..eeeeb08101 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,9 @@ ## Current release: - * [CHANGELOG-1.9.md][19] + * [CHANGELOG-1.11.md][21] ## Older releases: + * [CHANGELOG-1.10.md][20] + * [CHANGELOG-1.9.md][19] * [CHANGELOG-1.8.md][18] * [CHANGELOG-1.7.md][17] * [CHANGELOG-1.6.md][16] @@ -22,6 +24,8 @@ * [CHANGELOG-0.3.md][1] +[21]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.11.md +[20]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.10.md [19]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.9.md [18]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.8.md [17]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.7.md diff --git a/assets/README.md b/assets/README.md index 0e32fa6512..dd3ff65fe1 100644 --- a/assets/README.md +++ b/assets/README.md @@ -1,6 +1,6 @@ # Velero Assets -This folder contains logo images for Velero in gray (for light backgrounds) and white (for dark backgrounds like black tshirts or dark mode!) – horizontal and stacked… in .eps and .svg. +This folder contains logo images for Velero in gray (for light backgrounds) and white (for dark backgrounds like black t-shirts or dark mode!) – horizontal and stacked… in .eps and .svg. ## Some general guidelines for usage diff --git a/changelogs/unreleased/6397-Nutrymaco b/changelogs/unreleased/6397-Nutrymaco new file mode 100644 index 0000000000..318884c736 --- /dev/null +++ b/changelogs/unreleased/6397-Nutrymaco @@ -0,0 +1 @@ +Add missing CompletionTimestamp and metrics when restore moved into terminal phase in restoreOperationsReconciler \ No newline at end of file diff --git a/changelogs/unreleased/6415-mateusoliveira43 b/changelogs/unreleased/6415-mateusoliveira43 new file mode 100644 index 0000000000..b4351f6ba2 --- /dev/null +++ b/changelogs/unreleased/6415-mateusoliveira43 @@ -0,0 +1 @@ +fix: Typos and add more spell checking rules to CI \ No newline at end of file diff --git a/changelogs/unreleased/6476-reasonerjt b/changelogs/unreleased/6476-reasonerjt new file mode 100644 index 0000000000..f2b3d88867 --- /dev/null +++ b/changelogs/unreleased/6476-reasonerjt @@ -0,0 +1 @@ +Delete the expired deletebackuprequests that are stuck in "InProgress" \ No newline at end of file diff --git a/changelogs/unreleased/6481-blackpiglet b/changelogs/unreleased/6481-blackpiglet new file mode 100644 index 0000000000..2b54c14161 --- /dev/null +++ b/changelogs/unreleased/6481-blackpiglet @@ -0,0 +1 @@ +Remove PVC's selector in backup's PVC action. \ No newline at end of file diff --git a/changelogs/unreleased/6533-Lyndon-Li b/changelogs/unreleased/6533-Lyndon-Li new file mode 100644 index 0000000000..ec1b719578 --- /dev/null +++ b/changelogs/unreleased/6533-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #6534, reset PVB CR's StorageLocation to the latest one during backup sync as same as the backup CR. Also fix similar problem with DataUploadResult for data mover restore. \ No newline at end of file diff --git a/changelogs/unreleased/6544-allenxu404 b/changelogs/unreleased/6544-allenxu404 new file mode 100644 index 0000000000..88b7f28a58 --- /dev/null +++ b/changelogs/unreleased/6544-allenxu404 @@ -0,0 +1 @@ +check if restore crd exist before operating restores \ No newline at end of file diff --git a/changelogs/unreleased/6547-reasonerjt b/changelogs/unreleased/6547-reasonerjt new file mode 100644 index 0000000000..e5ca5d5c0f --- /dev/null +++ b/changelogs/unreleased/6547-reasonerjt @@ -0,0 +1 @@ +Delete moved snapshots when the backup is deleted \ No newline at end of file diff --git a/config/crd/v1/bases/velero.io_downloadrequests.yaml b/config/crd/v1/bases/velero.io_downloadrequests.yaml index f4daaec53b..2d9e2a9819 100644 --- a/config/crd/v1/bases/velero.io_downloadrequests.yaml +++ b/config/crd/v1/bases/velero.io_downloadrequests.yaml @@ -55,7 +55,7 @@ spec: - CSIBackupVolumeSnapshotContents type: string name: - description: Name is the name of the kubernetes resource with + description: Name is the name of the Kubernetes resource with which the file is associated. type: string required: diff --git a/config/crd/v1/bases/velero.io_restores.yaml b/config/crd/v1/bases/velero.io_restores.yaml index 8816a3d541..16c14526ed 100644 --- a/config/crd/v1/bases/velero.io_restores.yaml +++ b/config/crd/v1/bases/velero.io_restores.yaml @@ -55,7 +55,7 @@ spec: type: array existingResourcePolicy: description: ExistingResourcePolicy specifies the restore behavior - for the kubernetes resource to be restored + for the Kubernetes resource to be restored nullable: true type: string hooks: diff --git a/design/CLI/PoC/base/CRDs.yaml b/design/CLI/PoC/base/CRDs.yaml index ae2f5f1936..d911c40d5e 100644 --- a/design/CLI/PoC/base/CRDs.yaml +++ b/design/CLI/PoC/base/CRDs.yaml @@ -509,7 +509,7 @@ spec: - CSIBackupVolumeSnapshotContents type: string name: - description: Name is the name of the kubernetes resource with + description: Name is the name of the Kubernetes resource with which the file is associated. type: string required: diff --git a/design/Implemented/backup-resources-order.md b/design/Implemented/backup-resources-order.md index 92d6a6bebb..48131ccdb6 100644 --- a/design/Implemented/backup-resources-order.md +++ b/design/Implemented/backup-resources-order.md @@ -6,7 +6,7 @@ During backup process, user may need to back up resources of specific type in so (Ex: primary-secondary database pods in a cluster). ## Goals -- Enable user to specify an order of back up resources belong to specific resource type +- Enable user to specify an order of backup resources belong to specific resource type ## Alternatives Considered - Use a plugin to backup an resources and all the sub resources. For example use a plugin for StatefulSet and backup pods belong to the StatefulSet in specific order. This plugin solution is not generic and requires plugin for each resource type. diff --git a/design/Implemented/existing-resource-policy_design.md b/design/Implemented/existing-resource-policy_design.md index 84b3fb086f..7d119b8e73 100644 --- a/design/Implemented/existing-resource-policy_design.md +++ b/design/Implemented/existing-resource-policy_design.md @@ -1,6 +1,6 @@ # Add support for `ExistingResourcePolicy` to restore API ## Abstract -Velero currently does not support any restore policy on kubernetes resources that are already present in-cluster. Velero skips over the restore of the resource if it already exists in the namespace/cluster irrespective of whether the resource present in the restore is the same or different from the one present on the cluster. It is desired that Velero gives the option to the user to decide whether or not the resource in backup should overwrite the one present in the cluster. +Velero currently does not support any restore policy on Kubernetes resources that are already present in-cluster. Velero skips over the restore of the resource if it already exists in the namespace/cluster irrespective of whether the resource present in the restore is the same or different from the one present on the cluster. It is desired that Velero gives the option to the user to decide whether or not the resource in backup should overwrite the one present in the cluster. ## Background As of Today, Velero will skip over the restoration of resources that already exist in the cluster. The current workflow followed by Velero is (Using a `service` that is backed up for example): @@ -145,7 +145,7 @@ type RestoreSpec struct { . . . -// ExistingResourcePolicy specifies the restore behaviour for the kubernetes resource to be restored +// ExistingResourcePolicy specifies the restore behaviour for the Kubernetes resource to be restored // +optional ExistingResourcePolicy PolicyType @@ -167,7 +167,7 @@ type RestoreSpec struct { . . . -// ExistingResourcePolicyConfig specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored +// ExistingResourcePolicyConfig specifies the restore behaviour for a particular/list of Kubernetes resource(s) to be restored // +optional ExistingResourcePolicyConfig []PolicyConfiguration @@ -205,11 +205,11 @@ type RestoreSpec struct { . . . -// ExistingResourceDefaultPolicy specifies the default restore behaviour for the kubernetes resource to be restored +// ExistingResourceDefaultPolicy specifies the default restore behaviour for the Kubernetes resource to be restored // +optional existingResourceDefaultPolicy PolicyType -// ExistingResourcePolicyOverrides specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored +// ExistingResourcePolicyOverrides specifies the restore behaviour for a particular/list of Kubernetes resource(s) to be restored // +optional existingResourcePolicyOverrides []PolicyConfiguration diff --git a/design/Implemented/general-progress-monitoring.md b/design/Implemented/general-progress-monitoring.md index 831f186e83..35c2ed2ae7 100644 --- a/design/Implemented/general-progress-monitoring.md +++ b/design/Implemented/general-progress-monitoring.md @@ -5,9 +5,9 @@ This is intended as a replacement for the previously-approved Upload Progress Mo snapshot uploads to include what was previously called Async Backup/Restore Item Actions. This updated design should handle the combined set of use cases for those previously separate designs. -Volume snapshotter plug-in are used by Velero to take snapshots of persistent volume contents. +Volume snapshotter plugin are used by Velero to take snapshots of persistent volume contents. Depending on the underlying storage system, those snapshots may be available to use immediately, -they may be uploaded to stable storage internally by the plug-in or they may need to be uploaded after +they may be uploaded to stable storage internally by the plugin or they may need to be uploaded after the snapshot has been taken. We would like for Velero to continue on to the next part of the backup as quickly as possible but we would also like the backup to not be marked as complete until it is a usable backup. We'd also eventually like to bring the control of upload under the control of Velero and allow the user to make decisions @@ -36,7 +36,7 @@ backup and restore *could* make use of this framework if their actions are refac Backup/RestoreItemActions. - Data Movers - - Data movers are asynchronous processes executed inside backup/restore item actions that applies to a specific kubernetes resources. A common use case for data mover is to backup/restore PVCs whose data we want to move to some form of backup storage outside of using velero kopia/restic implementations. + - Data movers are asynchronous processes executed inside backup/restore item actions that applies to a specific Kubernetes resources. A common use case for data mover is to backup/restore PVCs whose data we want to move to some form of backup storage outside of using velero kopia/restic implementations. - Workflow - User takes velero backup of PVC A - BIA plugin applies to PVCs with compatible storage driver @@ -91,7 +91,7 @@ ID). ### Internal configuration and management In this model, movement of the snapshot to stable storage is under the control of the snapshot -plug-in. Decisions about where and when the snapshot gets moved to stable storage are not +plugin. Decisions about where and when the snapshot gets moved to stable storage are not directly controlled by Velero. This is the model for the current VolumeSnapshot plugins. ### Velero controlled management @@ -120,7 +120,7 @@ will remain in the "WaitingForPluginOperations" phase until all BIA/RIA operatio (for example, for a volume snapshotter, until all data has been successfully moved to persistent storage). The backup/restore will not fail once it reaches this phase, although an error return from a plugin could cause a backup or restore to move to "PartiallyFailed". If the backup is -deleted (cancelled), the plug-ins will attempt to delete the snapshots and stop the data movement - +deleted (cancelled), the plugins will attempt to delete the snapshots and stop the data movement - this may not be possible with all storage systems. In addition, for backups (but not restores), there will also be two additional phases, "Finalizing" @@ -145,7 +145,7 @@ terminates When work on the backup/restore begins, it moves to the "InProgress" phase. It remains in the "InProgress" phase until all pre/post execution hooks have been executed, all snapshots have been taken and the Kubernetes metadata and backup/restore info is safely written to the object store -plug-in. +plugin. In the current implementation, Restic backups will move data during the "InProgress" phase. In the future, it may be possible to combine a snapshot with a Restic (or equivalent) backup which would @@ -263,7 +263,7 @@ InProgress backups will not have a `velero-backup.json` present in the object st reconciliation, backups which do not have a `velero-backup.json` object in the object store will be ignored. -## Plug-in API changes +## Plugin API changes ### OperationProgress struct @@ -289,15 +289,15 @@ Two new methods will be added to the VolumeSnapshotter interface: Cancel(snapshotID string) (error) Progress will report the current status of a snapshot upload. This should be callable at -any time after the snapshot has been taken. In the event a plug-in is restarted, if the operationID +any time after the snapshot has been taken. In the event a plugin is restarted, if the operationID (snapshot ID) continues to be valid it should be possible to retrieve the progress. `error` is set if there is an issue retrieving progress. If the snapshot is has encountered an error during the upload, the error should be returned in OperationProgress and error should be nil. -### BackupItemAction and RestoreItemAction plug-in changes +### BackupItemAction and RestoreItemAction plugin changes -Currently CSI snapshots and the Velero Plug-in for vSphere are implemented as BackupItemAction +Currently CSI snapshots and the Velero Plugin for vSphere are implemented as BackupItemAction plugins. While the majority of BackupItemAction plugins do not take snapshots or upload data, this functionality is useful for any longstanding plugin operation managed by an external process/controller so we will modify BackupItemAction and RestoreItemAction to optionally return an @@ -333,7 +333,7 @@ will be modified: // initiate asynchronous actions, and a second slice of ResourceIdentifiers specifying related items // which should be backed up after all asynchronous operations have completed. This last field will be // ignored if operationID is empty, and should not be filled in unless the resource must be updated in the - // backup after async operations complete (i.e. some of the item's kubernetes metadata will be updated + // backup after async operations complete (i.e. some of the item's Kubernetes metadata will be updated // during the asynch operation which will be required during restore) Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) @@ -464,10 +464,10 @@ snapshot to stable storage. CSI snapshots expose the _readyToUse_ state that, i indicates that the snapshot has been transferred to durable storage and is ready to be used. The CSI BackupItemAction.Progress method will poll that field and when completed, return completion. -## vSphere plug-in +## vSphere plugin -The vSphere Plug-in for Velero uploads snapshots to S3 in the background. This is also a -BackupItemAction plug-in, it will check the status of the Upload records for the snapshot and return +The vSphere Plugin for Velero uploads snapshots to S3 in the background. This is also a +BackupItemAction plugin, it will check the status of the Upload records for the snapshot and return progress. ## Backup workflow changes @@ -553,14 +553,14 @@ RestoreItemAction new plugin APIs New backup phases New restore phases Defer uploading `velero-backup.json` -AWS EBS plug-in Progress implementation +AWS EBS plugin Progress implementation Operation monitoring Implementation of `-itemoperations.json.gz` file Implementation of `-itemoperations.json.gz` file Restart logic Change in reconciliation logic to ignore backups/restores that have not completed -CSI plug-in BackupItemAction Progress implementation -vSphere plug-in BackupItemAction Progress implementation (vSphere plug-in team) +CSI plugin BackupItemAction Progress implementation +vSphere plugin BackupItemAction Progress implementation (vSphere plugin team) # Open Questions diff --git a/design/Implemented/multiple-label-selectors_design.md b/design/Implemented/multiple-label-selectors_design.md index bb698cb480..9c301f3b51 100644 --- a/design/Implemented/multiple-label-selectors_design.md +++ b/design/Implemented/multiple-label-selectors_design.md @@ -3,7 +3,7 @@ As of today Velero supports filtering of resources based on single label selector per backup. It is desired that Velero support backing up of resources based on multiple labels (OR logic). -**Note:** This solution is required because kubernetes label selectors only allow AND logic of labels. +**Note:** This solution is required because Kubernetes label selectors only allow AND logic of labels. ## Background Currently, Velero's Backup/Restore API has a spec field `LabelSelector` which helps in filtering of resources based on diff --git a/design/Implemented/plugin-backup-and-restore-progress-design.md b/design/Implemented/plugin-backup-and-restore-progress-design.md index dbbc67b40b..46544f01bb 100644 --- a/design/Implemented/plugin-backup-and-restore-progress-design.md +++ b/design/Implemented/plugin-backup-and-restore-progress-design.md @@ -393,7 +393,7 @@ Deletion of `VolumePluginBackup` CR can be delegated to plugin. Plugin can perfo ### 'core' Velero client/server required changes - Creation of the VolumePluginBackup/VolumePluginRestore CRDs at installation time -- Persistence of VolumePluginBackup CRs towards the end of the back up operation +- Persistence of VolumePluginBackup CRs towards the end of the backup operation - As part of backup synchronization, VolumePluginBackup CRs related to the backup will be synced. - Deletion of VolumePluginBackup when volumeshapshotter's DeleteSnapshot is called - Deletion of VolumePluginRestore as part of handling deletion of Restore CR diff --git a/design/Implemented/velero-debug.md b/design/Implemented/velero-debug.md index 2a6fe7128c..7da56e48f2 100644 --- a/design/Implemented/velero-debug.md +++ b/design/Implemented/velero-debug.md @@ -102,7 +102,7 @@ The code will consolidate the input parameters and execution context of the `vel https://github.com/vmware-tanzu/crash-diagnostics/blob/v0.3.4/exec/executor.go#L17 ## Alternatives Considered -The collection could be done via the kubernetes client-go API, but such integration is not necessarily trivial to implement, therefore, `crashd` is preferred approach +The collection could be done via the Kubernetes client-go API, but such integration is not necessarily trivial to implement, therefore, `crashd` is preferred approach ## Security Considerations - The starlark script will be embedded into the velero binary, and the byte slice will be passed to the `exec.Execute` func directly, so there’s little risk that the script will be modified before being executed. diff --git a/design/secrets.md b/design/secrets.md index 1090b22e08..b1999c037f 100644 --- a/design/secrets.md +++ b/design/secrets.md @@ -160,10 +160,10 @@ while the cloud credential will always be used for the VolumeSnapshotter. ## Velero Plugin for vSphere compatibility -The vSphere plugin is implemented as a BackupItemAction and shares the credentials of the AWS plug-in for S3 access. +The vSphere plugin is implemented as a BackupItemAction and shares the credentials of the AWS plugin for S3 access. The backup storage location is passed in _Backup.Spec.StorageLocation_. Currently the plugin retrieves the S3 bucket and server from the BSL and creates a BackupRespositoryClaim with that and the credentials retrieved from the cloud credential. -The plug-in will need to be modified to retrieve the credentials field from the BSL and use that credential in the +The plugin will need to be modified to retrieve the credentials field from the BSL and use that credential in the BackupRepositoryClaim. ## Backwards compatibility @@ -185,7 +185,7 @@ In order to support parallelism, Velero will need to be able to use multiple cre ObjectStore. Currently backups are single threaded and a single BSL will be used throughout the entire backup. The only existing points of parallelism are when a user downloads logs for a backup or the BackupStorageLocationReconciler reconciles while a backup or restore is running. In the current code, `download_request_controller.go` and -`backup_storage_location_controller.go` create a new plug-in manager and hence another ObjectStore plugin in +`backup_storage_location_controller.go` create a new plugin manager and hence another ObjectStore plugin in parallel with the ObjectStore plugin servicing a backup or restore (if one is running). ## Alternatives Considered diff --git a/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md index 9790052f79..f1cc7f61ee 100644 --- a/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md +++ b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md @@ -3,7 +3,7 @@ ## Glossary & Abbreviation **BR**: Backup & Restore -**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premise or in cloud. Backup Storage is not BR specific necessarily, so it usually doesn’t provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage. +**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premises or in cloud. Backup Storage is not BR specific necessarily, so it usually doesn’t provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage. **Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features. Backup Repository is a part of BR solution, so generally, BR solution by default leverages the Backup Repository to provide the features because Backup Repository is always available; when Backup Storage provides duplicated features, and the latter is more beneficial (i.e., performance is better), BR solution should have the ability to opt to use the Backup Storage’s implementation. **Data Mover**: The BR module to read/write data from/to workloads, the aim is to eliminate the differences of workloads. **TCO**: Total Cost of Ownership. This is a general criteria for products/solutions, but also means a lot for BR solutions. For example, this means what kind of backup storage (and its cost) it requires, the retention policy of backup copies, the ways to remove backup data redundancy, etc. diff --git a/design/upload-progress.md b/design/upload-progress.md index 22481a74ca..556d39cb1e 100644 --- a/design/upload-progress.md +++ b/design/upload-progress.md @@ -1,8 +1,8 @@ # Upload Progress Monitoring -Volume snapshotter plug-in are used by Velero to take snapshots of persistent volume contents. +Volume snapshotter plugin are used by Velero to take snapshots of persistent volume contents. Depending on the underlying storage system, those snapshots may be available to use immediately, -they may be uploaded to stable storage internally by the plug-in or they may need to be uploaded after +they may be uploaded to stable storage internally by the plugin or they may need to be uploaded after the snapshot has been taken. We would like for Velero to continue on to the next part of the backup as quickly as possible but we would also like the backup to not be marked as complete until it is a usable backup. We'd also eventually like to bring the control of upload under the control of Velero and allow the user to make decisions @@ -23,7 +23,7 @@ Restic - Does not go through the volume snapshot path. Restic backups will bloc - Enable monitoring of operations that continue after snapshotting operations have completed - Keep non-usable backups (upload/persistence has not finished) from appearing as completed -- Minimize change to volume snapshot and BackupItemAction plug-ins +- Minimize change to volume snapshot and BackupItemAction plugins ## Non-goals - Unification of BackupItemActions and VolumeSnapshotters @@ -32,7 +32,7 @@ Restic - Does not go through the volume snapshot path. Restic backups will bloc ### Internal configuration and management In this model, movement of the snapshot to stable storage is under the control of the snapshot -plug-in. Decisions about where and when the snapshot gets moved to stable storage are not +plugin. Decisions about where and when the snapshot gets moved to stable storage are not directly controlled by Velero. This is the model for the current VolumeSnapshot plugins. ### Velero controlled management @@ -56,7 +56,7 @@ slow the progress of the system without adding any actual benefit to the user. A new backup phase, "Uploading" will be introduced. When a backup has entered this phase, Velero is free to start another backup. The backup will remain in the "Uploading" phase until all data has been successfully moved to persistent storage. The backup will not fail once it reaches -this phase, it will continuously retry moving the data. If the backup is deleted (cancelled), the plug-ins will +this phase, it will continuously retry moving the data. If the backup is deleted (cancelled), the plugins will attempt to delete the snapshots and stop the data movement - this may not be possible with all storage systems. @@ -74,7 +74,7 @@ If the backup request is incorrectly formed, it goes to the "FailedValidation" p ### InProgress When work on the backup begins, it moves to the "InProgress" phase. It remains in the "InProgress" phase until all pre/post execution hooks have been executed, all snapshots have been taken and the -Kubernetes metadata and backup info is safely written to the object store plug-in. +Kubernetes metadata and backup info is safely written to the object store plugin. In the current implementation, Restic backups will move data during the "InProgress" phase. In the future, it may be possible to combine a snapshot with a Restic (or equivalent) backup which @@ -146,7 +146,7 @@ Completed, Failed or PartialFailure InProgress backups will not have a `velero-backup.json` present in the object store. During reconciliation, backups which do not have a `velero-backup.json` object in the object store will be ignored. -## Plug-in API changes +## Plugin API changes ### UploadProgress struct @@ -166,23 +166,23 @@ do not have a `velero-backup.json` object in the object store will be ignored. ### VolumeSnapshotter changes -A new method will be added to the VolumeSnapshotter interface (details depending on plug-in versioning spec) +A new method will be added to the VolumeSnapshotter interface (details depending on plugin versioning spec) UploadProgress(snapshotID string) (UploadProgress, error) UploadProgress will report the current status of a snapshot upload. This should be callable at any time after the snapshot -has been taken. In the event a plug-in is restarted, if the snapshotID continues to be valid it should be possible to +has been taken. In the event a plugin is restarted, if the snapshotID continues to be valid it should be possible to retrieve the progress. `error` is set if there is an issue retrieving progress. If the snapshot is has encountered an error during the upload, the error should be return in UploadProgress and error should be nil. -### SnapshotItemAction plug-in +### SnapshotItemAction plugin -Currently CSI snapshots and the Velero Plug-in for vSphere are implemented as BackupItemAction plugins. The majority of +Currently CSI snapshots and the Velero Plugin for vSphere are implemented as BackupItemAction plugins. The majority of BackupItemAction plugins do not take snapshots or upload data so rather than modify BackupItemAction we introduce a new -plug-ins, SnapshotItemAction. SnapshotItemAction will be used in place of BackupItemAction for -the CSI snapshots and the Velero Plug-in for vSphere and will return a snapshot ID in addition to the item itself. +plugins, SnapshotItemAction. SnapshotItemAction will be used in place of BackupItemAction for +the CSI snapshots and the Velero Plugin for vSphere and will return a snapshot ID in addition to the item itself. The SnapshotItemAction plugin identifier as well as the Item and Snapshot ID will be stored in the `-itemsnapshots.json.gz`. When checking for progress, this info will be used to select the appropriate @@ -248,9 +248,9 @@ stable storage. CSI snapshots expose the _readyToUse_ state that, in the case o has been transferred to durable storage and is ready to be used. The CSI BackupItemProgress.Progress method will poll that field and when completed, return completion. -## vSphere plug-in +## vSphere plugin -The vSphere Plug-in for Velero uploads snapshots to S3 in the background. This is also a BackupItemAction plug-in, +The vSphere Plugin for Velero uploads snapshots to S3 in the background. This is also a BackupItemAction plugin, it will check the status of the Upload records for the snapshot and return progress. ## Backup workflow changes @@ -281,13 +281,13 @@ VolumeSnapshotter new plugin APIs BackupItemProgress new plugin interface New backup phases Defer uploading `velero-backup.json` -AWS EBS plug-in UploadProgress implementation +AWS EBS plugin UploadProgress implementation Upload monitoring Implementation of `-itemsnapshots.json.gz` file Restart logic Change in reconciliation logic to ignore backups that have not completed -CSI plug-in BackupItemProgress implementation -vSphere plug-in BackupItemProgress implementation (vSphere plug-in team) +CSI plugin BackupItemProgress implementation +vSphere plugin BackupItemProgress implementation (vSphere plugin team) # Future Fragile/Durable snapshot tracking Futures are here for reference, they may change radically when actually implemented. @@ -296,11 +296,11 @@ Some storage systems have the ability to provide different levels of protection and "Durable". Currently, Velero expects snapshots to be Durable (they should be able to survive the destruction of the cluster and the storage it is using). In the future we would like the ability to take advantage of snapshots that are Fragile. For example, vSphere snapshots are Fragile (they reside in the same datastore as the virtual disk). The Velero -Plug-in for vSphere uses a vSphere local/fragile snapshot to get a consistent snapshot, then uploads the data to S3 to +Plugin for vSphere uses a vSphere local/fragile snapshot to get a consistent snapshot, then uploads the data to S3 to make it Durable. In the current design, upload progress will not be complete until the snapshot is ready to use and Durable. It is possible, however, to restore data from a vSphere snapshot before it has been made Durable, and this is a capability we'd like to expose in the future. Other storage systems implement this functionality as well. We will be moving -the control of the data movement from the vSphere plug-in into Velero. +the control of the data movement from the vSphere plugin into Velero. Some storage system, such as EBS, are only capable of creating Durable snapshots. There is no usable intermediate Fragile stage. diff --git a/golangci.yaml b/golangci.yaml index 8eaec0facb..bdd7b5d068 100644 --- a/golangci.yaml +++ b/golangci.yaml @@ -201,12 +201,12 @@ linters-settings: - shadow disable-all: false depguard: - list-type: blacklist + list-type: blacklist # Velero.io word list : ignore include-go-root: false packages: - github.com/sirupsen/logrus packages-with-error-message: - # specify an error message to output when a blacklisted package is used + # specify an error message to output when a denylisted package is used - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" lll: # max line length, lines longer will be reported. Default is 120. diff --git a/pkg/apis/velero/v1/download_request_types.go b/pkg/apis/velero/v1/download_request_types.go index a5ce777939..7c7be53373 100644 --- a/pkg/apis/velero/v1/download_request_types.go +++ b/pkg/apis/velero/v1/download_request_types.go @@ -49,7 +49,7 @@ type DownloadTarget struct { // Kind is the type of file to download. Kind DownloadTargetKind `json:"kind"` - // Name is the name of the kubernetes resource with which the file is associated. + // Name is the name of the Kubernetes resource with which the file is associated. Name string `json:"name"` } diff --git a/pkg/apis/velero/v1/labels_annotations.go b/pkg/apis/velero/v1/labels_annotations.go index b35cd6c6fb..e16d947efb 100644 --- a/pkg/apis/velero/v1/labels_annotations.go +++ b/pkg/apis/velero/v1/labels_annotations.go @@ -57,6 +57,10 @@ const ( // DataUploadLabel is the label key used to identify the dataupload for snapshot backup pod DataUploadLabel = "velero.io/data-upload" + // DataUploadSnapshotInfoLabel is used to identify the configmap that contains the snapshot info of a data upload + // normally the value of the label should the "true" or "false" + DataUploadSnapshotInfoLabel = "velero.io/data-upload-snapshot-info" + // DataDownloadLabel is the label key used to identify the datadownload for snapshot restore pod DataDownloadLabel = "velero.io/data-download" diff --git a/pkg/apis/velero/v1/restore_types.go b/pkg/apis/velero/v1/restore_types.go index c1dafa7406..a961d5c176 100644 --- a/pkg/apis/velero/v1/restore_types.go +++ b/pkg/apis/velero/v1/restore_types.go @@ -109,7 +109,7 @@ type RestoreSpec struct { // +optional Hooks RestoreHooks `json:"hooks,omitempty"` - // ExistingResourcePolicy specifies the restore behavior for the kubernetes resource to be restored + // ExistingResourcePolicy specifies the restore behavior for the Kubernetes resource to be restored // +optional // +nullable ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"` diff --git a/pkg/backup/backup_pv_action.go b/pkg/backup/backup_pv_action.go index 56ff1f4b94..a636f83d89 100644 --- a/pkg/backup/backup_pv_action.go +++ b/pkg/backup/backup_pv_action.go @@ -72,6 +72,14 @@ func (a *PVCAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti pvc.Spec.DataSourceRef = nil } + // When StorageClassName is set to "", it means no StorageClass is specified, + // even the default StorageClass is not used. Only keep the Selector for this case. + // https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reserving-a-persistentvolume + if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName != "" { + // Clean the selector to make the PVC to dynamically allocate PV. + pvc.Spec.Selector = nil + } + // remove label selectors with "velero.io/" prefixing in the key which is left by Velero restore if pvc.Spec.Selector != nil && pvc.Spec.Selector.MatchLabels != nil { for k := range pvc.Spec.Selector.MatchLabels { diff --git a/pkg/backup/backup_pv_action_test.go b/pkg/backup/backup_pv_action_test.go index 8cb04e2210..d9e0a5b333 100644 --- a/pkg/backup/backup_pv_action_test.go +++ b/pkg/backup/backup_pv_action_test.go @@ -21,10 +21,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -55,6 +59,61 @@ func TestBackupPVAction(t *testing.T) { assert.NoError(t, err) assert.Len(t, additional, 0) + // Action should clean the spec.Selector when the StorageClassName is not set. + input := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1.ClaimBound).Result() + inputUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(input) + require.NoError(t, err) + item, additional, err := a.Execute(&unstructured.Unstructured{Object: inputUnstructured}, backup) + require.NoError(t, err) + require.Len(t, additional, 1) + modifiedPVC := new(corev1.PersistentVolumeClaim) + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), modifiedPVC)) + require.Nil(t, modifiedPVC.Spec.Selector) + + // Action should clean the spec.Selector when the StorageClassName is set to specific StorageClass + input2 := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1.ClaimBound).Result() + inputUnstructured2, err2 := runtime.DefaultUnstructuredConverter.ToUnstructured(input2) + require.NoError(t, err2) + item2, additional2, err2 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured2}, backup) + require.NoError(t, err2) + require.Len(t, additional2, 1) + modifiedPVC2 := new(corev1.PersistentVolumeClaim) + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item2.UnstructuredContent(), modifiedPVC2)) + require.Nil(t, modifiedPVC2.Spec.Selector) + + // Action should keep the spec.Selector when the StorageClassName is set to "" + input3 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result() + inputUnstructured3, err3 := runtime.DefaultUnstructuredConverter.ToUnstructured(input3) + require.NoError(t, err3) + item3, additional3, err3 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured3}, backup) + require.NoError(t, err3) + require.Len(t, additional3, 1) + modifiedPVC3 := new(corev1.PersistentVolumeClaim) + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item3.UnstructuredContent(), modifiedPVC3)) + require.Equal(t, input3.Spec.Selector, modifiedPVC3.Spec.Selector) + + // Action should delete label started with"velero.io/" from the spec.Selector when the StorageClassName is set to "" + input4 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result() + inputUnstructured4, err4 := runtime.DefaultUnstructuredConverter.ToUnstructured(input4) + require.NoError(t, err4) + item4, additional4, err4 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured4}, backup) + require.NoError(t, err4) + require.Len(t, additional4, 1) + modifiedPVC4 := new(corev1.PersistentVolumeClaim) + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item4.UnstructuredContent(), modifiedPVC4)) + require.Equal(t, &metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}, modifiedPVC4.Spec.Selector) + + // Action should clean the spec.Selector when the StorageClassName has value + input5 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result() + inputUnstructured5, err5 := runtime.DefaultUnstructuredConverter.ToUnstructured(input5) + require.NoError(t, err5) + item5, additional5, err5 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured5}, backup) + require.NoError(t, err5) + require.Len(t, additional5, 1) + modifiedPVC5 := new(corev1.PersistentVolumeClaim) + require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item5.UnstructuredContent(), modifiedPVC5)) + require.Nil(t, modifiedPVC5.Spec.Selector) + // non-empty spec.volumeName when status.phase is empty // should result in no error and no additional items pvc.Object["spec"].(map[string]interface{})["volumeName"] = "myVolume" diff --git a/pkg/backup/service_account_action_test.go b/pkg/backup/service_account_action_test.go index cd42c2e122..b17b9748cb 100644 --- a/pkg/backup/service_account_action_test.go +++ b/pkg/backup/service_account_action_test.go @@ -77,7 +77,7 @@ func (f FakeV1beta1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error func TestServiceAccountActionAppliesTo(t *testing.T) { // Instantiating the struct directly since using - // NewServiceAccountAction requires a full kubernetes clientset + // NewServiceAccountAction requires a full Kubernetes clientset a := &ServiceAccountAction{} actual, err := a.AppliesTo() diff --git a/pkg/cmd/cli/uninstall/uninstall.go b/pkg/cmd/cli/uninstall/uninstall.go index 79ed048eb3..ec2de648fc 100644 --- a/pkg/cmd/cli/uninstall/uninstall.go +++ b/pkg/cmd/cli/uninstall/uninstall.go @@ -212,7 +212,18 @@ func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace st } func deleteResourcesWithFinalizer(ctx context.Context, kbClient kbclient.Client, namespace string) error { - // delete restores + //check if restore crd exists + v1crd := &apiextv1.CustomResourceDefinition{} + key := kbclient.ObjectKey{Name: "restores.velero.io"} + if err := kbClient.Get(ctx, key, v1crd); err != nil { + if apierrors.IsNotFound(err) { + return nil + } else { + return err + } + } + + // delete all the restores restoreList := &velerov1api.RestoreList{} if err := kbClient.List(ctx, restoreList, &kbclient.ListOptions{Namespace: namespace}); err != nil { return err diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 45e45389ac..3a4f97107e 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -21,6 +21,8 @@ import ( "github.com/spf13/cobra" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "github.com/vmware-tanzu/velero/pkg/datamover" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/client" @@ -59,7 +61,9 @@ func NewCommand(f client.Factory) *cobra.Command { RegisterRestoreItemAction("velero.io/apiservice", newAPIServiceRestoreItemAction). RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction). RegisterRestoreItemAction("velero.io/secret", newSecretRestoreItemAction(f)). - RegisterRestoreItemAction("velero.io/dataupload", newDataUploadRetrieveAction(f)) + RegisterRestoreItemAction("velero.io/dataupload", newDataUploadRetrieveAction(f)). + RegisterDeleteItemAction("velero.io/dataupload-delete", newDateUploadDeleteItemAction(f)) + if !features.IsEnabled(velerov1api.APIGroupVersionsFeatureFlag) { // Do not register crd-remap-version BIA if the API Group feature flag is enabled, so that the v1 CRD can be backed up pluginServer = pluginServer.RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)) @@ -248,10 +252,21 @@ func newSecretRestoreItemAction(f client.Factory) plugincommon.HandlerInitialize func newDataUploadRetrieveAction(f client.Factory) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { - client, err := f.KubeClient() + client, err := f.KubebuilderClient() + if err != nil { + return nil, err + } + + return restore.NewDataUploadRetrieveAction(logger, client), nil + } +} + +func newDateUploadDeleteItemAction(f client.Factory) plugincommon.HandlerInitializer { + return func(logger logrus.FieldLogger) (interface{}, error) { + client, err := f.KubebuilderClient() if err != nil { return nil, err } - return restore.NewDataUploadRetrieveAction(logger, client.CoreV1().ConfigMaps(f.Namespace())), nil + return datamover.NewDataUploadDeleteAction(logger, client), nil } } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index e0c774373e..e30d2b30a5 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -436,7 +436,7 @@ func (s *server) run() error { } // namespaceExists returns nil if namespace can be successfully -// gotten from the kubernetes API, or an error otherwise. +// gotten from the Kubernetes API, or an error otherwise. func (s *server) namespaceExists(namespace string) error { s.logger.WithField("namespace", namespace).Info("Checking existence of namespace.") @@ -879,7 +879,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string restoreOpsMap, ) if err := r.SetupWithManager(s.mgr); err != nil { - s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupOperations) + s.logger.Fatal(err, "unable to create controller", "controller", controller.RestoreOperations) } } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 4511a50a0c..a654958be0 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -22,6 +22,11 @@ import ( "fmt" "time" + corev1 "k8s.io/api/core/v1" + + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/util/boolptr" + jsonpatch "github.com/evanphx/json-patch" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -126,15 +131,16 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Since we use the reconciler along with the PeriodicalEnqueueSource, there may be reconciliation triggered by // stale requests. - if dbr.Status.Phase == velerov1api.DeleteBackupRequestPhaseProcessed { + if dbr.Status.Phase == velerov1api.DeleteBackupRequestPhaseProcessed || + dbr.Status.Phase == velerov1api.DeleteBackupRequestPhaseInProgress { age := r.clock.Now().Sub(dbr.CreationTimestamp.Time) if age >= deleteBackupRequestMaxAge { // delete the expired request - log.Debug("The request is expired, deleting it.") + log.Debugf("The request is expired, status: %s, deleting it.", dbr.Status.Phase) if err := r.Delete(ctx, dbr); err != nil { log.WithError(err).Error("Error deleting DeleteBackupRequest") } } else { - log.Info("The request has been processed, skip.") + log.Infof("The request has status '%s', skip.", dbr.Status.Phase) } return ctrl.Result{}, nil } @@ -314,6 +320,33 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque } } + if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) { + log.Info("Removing snapshot data by data mover") + if deleteErrs := r.deleteMovedSnapshots(ctx, backup); len(deleteErrs) > 0 { + for _, err := range deleteErrs { + errs = append(errs, err.Error()) + } + } + duList := &velerov2alpha1.DataUploadList{} + log.Info("Removing local datauploads") + if err := r.Client.List(ctx, duList, &client.ListOptions{ + Namespace: backup.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1api.BackupNameLabel: label.GetValidName(backup.Name), + }), + }); err != nil { + log.WithError(err).Error("Error listing datauploads") + errs = append(errs, err.Error()) + } else { + for i := range duList.Items { + du := duList.Items[i] + if err := r.Delete(ctx, &du); err != nil { + errs = append(errs, err.Error()) + } + } + } + } + if backupStore != nil { log.Info("Removing backup from backup storage") if err := backupStore.DeleteBackup(backup.Name); err != nil { @@ -469,6 +502,41 @@ func (r *backupDeletionReconciler) deletePodVolumeSnapshots(ctx context.Context, return errs } +func (r *backupDeletionReconciler) deleteMovedSnapshots(ctx context.Context, backup *velerov1api.Backup) []error { + if r.repoMgr == nil { + return nil + } + list := &corev1.ConfigMapList{} + if err := r.Client.List(ctx, list, &client.ListOptions{ + Namespace: backup.Namespace, + LabelSelector: labels.SelectorFromSet( + map[string]string{ + velerov1api.BackupNameLabel: label.GetValidName(backup.Name), + velerov1api.DataUploadSnapshotInfoLabel: "true", + }), + }); err != nil { + return []error{errors.Wrapf(err, "failed to retrieve config for snapshot info")} + } + var errs []error + for i := range list.Items { + cm := list.Items[i] + snapshot := repository.SnapshotIdentifier{} + b, _ := json.Marshal(cm.Data) + if err := json.Unmarshal(b, &snapshot); err != nil { + errs = append(errs, errors.Wrapf(err, "failed to unmarshal snapshot info")) + continue + } + if err := r.repoMgr.Forget(ctx, snapshot); err != nil { + errs = append(errs, errors.Wrapf(err, "failed to delete snapshot %s, namespace: %s", snapshot.SnapshotID, snapshot.VolumeNamespace)) + } + r.logger.Infof("Deleted snapshot %s, namespace: %s, repo type: %s", snapshot.SnapshotID, snapshot.VolumeNamespace, snapshot.RepositoryType) + if err := r.Client.Delete(ctx, &cm); err != nil { + r.logger.Warnf("Failed to delete snapshot info configmap %s/%s: %v", cm.Namespace, cm.Name, err) + } + } + return errs +} + func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context, req *velerov1api.DeleteBackupRequest, mutate func(*velerov1api.DeleteBackupRequest)) (*velerov1api.DeleteBackupRequest, error) { original := req.DeepCopy() mutate(req) diff --git a/pkg/controller/backup_finalizer_controller_test.go b/pkg/controller/backup_finalizer_controller_test.go index 1e0f88511d..011a6561bd 100644 --- a/pkg/controller/backup_finalizer_controller_test.go +++ b/pkg/controller/backup_finalizer_controller_test.go @@ -19,7 +19,7 @@ package controller import ( "bytes" "context" - "io/ioutil" + "io" "testing" "time" @@ -163,7 +163,7 @@ func TestBackupFinalizerReconcile(t *testing.T) { reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeClock) pluginManager.On("CleanupClients").Return(nil) backupStore.On("GetBackupItemOperations", test.backup.Name).Return(test.backupOperations, nil) - backupStore.On("GetBackupContents", mock.Anything).Return(ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), nil) + backupStore.On("GetBackupContents", mock.Anything).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil) backupStore.On("PutBackupContents", mock.Anything, mock.Anything).Return(nil) backupStore.On("PutBackupMetadata", mock.Anything, mock.Anything).Return(nil) pluginManager.On("GetBackupItemActionsV2").Return(nil, nil) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 0ffc0f9777..b3d996d2f4 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -208,6 +208,7 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) podVolumeBackup.Namespace = backup.Namespace podVolumeBackup.ResourceVersion = "" + podVolumeBackup.Spec.BackupStorageLocation = location.Name err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{}) switch { diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index d41a262863..a5947444e9 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -109,7 +109,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } - if dd.Spec.DataMover != "" && dd.Spec.DataMover != dataMoverType { + if !datamover.IsBuiltInUploader(dd.Spec.DataMover) { log.WithField("data mover", dd.Spec.DataMover).Info("it is not one built-in data mover which is not supported by Velero") return ctrl.Result{}, nil } diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index de54afd0dd..f52de9f9bb 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -53,10 +53,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/kube" ) -const dataMoverType string = "velero" -const dataUploadDownloadRequestor string = "snapshot-data-upload-download" - -const preparingMonitorFrequency time.Duration = time.Minute +const ( + dataUploadDownloadRequestor string = "snapshot-data-upload-download" + preparingMonitorFrequency time.Duration = time.Minute +) // DataUploadReconciler reconciles a DataUpload object type DataUploadReconciler struct { @@ -116,7 +116,7 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, errors.Wrap(err, "getting DataUpload") } - if du.Spec.DataMover != "" && du.Spec.DataMover != dataMoverType { + if !datamover.IsBuiltInUploader(du.Spec.DataMover) { log.WithField("Data mover", du.Spec.DataMover).Debug("it is not one built-in data mover which is not supported by Velero") return ctrl.Result{}, nil } diff --git a/pkg/controller/restore_operations_controller.go b/pkg/controller/restore_operations_controller.go index 869c5acb1b..de7b96a760 100644 --- a/pkg/controller/restore_operations_controller.go +++ b/pkg/controller/restore_operations_controller.go @@ -130,6 +130,8 @@ func (r *restoreOperationsReconciler) Reconcile(ctx context.Context, req ctrl.Re if err != nil { log.Warnf("Cannot check progress on Restore operations because backup info is unavailable %s; marking restore PartiallyFailed", err.Error()) restore.Status.Phase = velerov1api.RestorePhasePartiallyFailed + restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} + r.metrics.RegisterRestorePartialFailure(restore.Spec.ScheduleName) err2 := r.updateRestoreAndOperationsJSON(ctx, original, restore, nil, &itemoperationmap.OperationsForRestore{ErrsSinceUpdate: []string{err.Error()}}, false, false) if err2 != nil { log.WithError(err2).Error("error updating Restore") @@ -140,7 +142,8 @@ func (r *restoreOperationsReconciler) Reconcile(ctx context.Context, req ctrl.Re if info.location.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly { log.Infof("Cannot check progress on Restore operations because backup storage location %s is currently in read-only mode; marking restore PartiallyFailed", info.location.Name) restore.Status.Phase = velerov1api.RestorePhasePartiallyFailed - + restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} + r.metrics.RegisterRestorePartialFailure(restore.Spec.ScheduleName) err := r.updateRestoreAndOperationsJSON(ctx, original, restore, nil, &itemoperationmap.OperationsForRestore{ErrsSinceUpdate: []string{"BSL is read-only"}}, false, false) if err != nil { log.WithError(err).Error("error updating Restore") @@ -189,10 +192,12 @@ func (r *restoreOperationsReconciler) Reconcile(ctx context.Context, req ctrl.Re if restore.Status.Phase == velerov1api.RestorePhaseWaitingForPluginOperations { log.Infof("Marking restore %s completed", restore.Name) restore.Status.Phase = velerov1api.RestorePhaseCompleted + restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} r.metrics.RegisterRestoreSuccess(restore.Spec.ScheduleName) } else { log.Infof("Marking restore %s FinalizingPartiallyFailed", restore.Name) restore.Status.Phase = velerov1api.RestorePhasePartiallyFailed + restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} r.metrics.RegisterRestorePartialFailure(restore.Spec.ScheduleName) } } diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index d3268399c8..86386752b5 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -199,11 +199,10 @@ func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Sch for _, backup := range backupList.Items { if backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress { + log.Debugf("%s/%s still has backups that are in InProgress or New...", schedule.Namespace, schedule.Name) return true } } - - log.Debugf("Schedule %s/%s still has backups are in InProgress or New state, skip submitting backup to avoid overlap.", schedule.Namespace, schedule.Name) return false } diff --git a/pkg/datamover/dataupload_delete_action.go b/pkg/datamover/dataupload_delete_action.go new file mode 100644 index 0000000000..7810979290 --- /dev/null +++ b/pkg/datamover/dataupload_delete_action.go @@ -0,0 +1,86 @@ +package datamover + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" + "github.com/vmware-tanzu/velero/pkg/repository" +) + +type DataUploadDeleteAction struct { + logger logrus.FieldLogger + client client.Client +} + +func (d *DataUploadDeleteAction) AppliesTo() (velero.ResourceSelector, error) { + return velero.ResourceSelector{ + IncludedResources: []string{"datauploads.velero.io"}, + }, nil +} + +func (d *DataUploadDeleteAction) Execute(input *velero.DeleteItemActionExecuteInput) error { + d.logger.Infof("Executing DataUploadDeleteAction") + du := &velerov2alpha1.DataUpload{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), &du); err != nil { + return errors.WithStack(errors.Wrapf(err, "failed to convert input.Item from unstructured")) + } + cm := genConfigmap(input.Backup, *du) + if cm == nil { + // will not fail the backup deletion + return nil + } + err := d.client.Create(context.Background(), cm) + if err != nil { + return errors.WithStack(errors.Wrapf(err, "failed to create the configmap for DataUpload %s/%s", du.Namespace, du.Name)) + } + return nil +} + +// generate the configmap which is to be created and used as a way to communicate the snapshot info to the backup deletion controller +func genConfigmap(bak *velerov1.Backup, du velerov2alpha1.DataUpload) *corev1api.ConfigMap { + if !IsBuiltInUploader(du.Spec.DataMover) || du.Status.SnapshotID == "" { + return nil + } + snapshot := repository.SnapshotIdentifier{ + VolumeNamespace: du.Spec.SourceNamespace, + BackupStorageLocation: bak.Spec.StorageLocation, + SnapshotID: du.Status.SnapshotID, + RepositoryType: GetUploaderType(du.Spec.DataMover), + } + b, _ := json.Marshal(snapshot) + data := make(map[string]string) + _ = json.Unmarshal(b, &data) + return &corev1api.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: bak.Namespace, + Name: fmt.Sprintf("%s-info", du.Name), + Labels: map[string]string{ + velerov1.BackupNameLabel: bak.Name, + velerov1.DataUploadSnapshotInfoLabel: "true", + }, + }, + Data: data, + } +} + +func NewDataUploadDeleteAction(logger logrus.FieldLogger, client client.Client) *DataUploadDeleteAction { + return &DataUploadDeleteAction{ + logger: logger, + client: client, + } +} diff --git a/pkg/datamover/util.go b/pkg/datamover/util.go index 757deb0d2a..f39f49cfbd 100644 --- a/pkg/datamover/util.go +++ b/pkg/datamover/util.go @@ -23,3 +23,7 @@ func GetUploaderType(dataMover string) string { return dataMover } } + +func IsBuiltInUploader(dataMover string) bool { + return dataMover == "" || dataMover == "velero" +} diff --git a/pkg/datamover/util_test.go b/pkg/datamover/util_test.go new file mode 100644 index 0000000000..d4e3e6efe0 --- /dev/null +++ b/pkg/datamover/util_test.go @@ -0,0 +1,70 @@ +package datamover + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsBuiltInUploader(t *testing.T) { + testcases := []struct { + name string + dataMover string + want bool + }{ + { + name: "empty dataMover is builtin", + dataMover: "", + want: true, + }, + { + name: "velero dataMover is builtin", + dataMover: "velero", + want: true, + }, + { + name: "kopia dataMover is not builtin", + dataMover: "kopia", + want: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(tt *testing.T) { + assert.Equal(tt, tc.want, IsBuiltInUploader(tc.dataMover)) + }) + } +} + +func TestGetUploaderType(t *testing.T) { + testcases := []struct { + name string + input string + want string + }{ + { + name: "empty dataMover is kopia", + input: "", + want: "kopia", + }, + { + name: "velero dataMover is kopia", + input: "velero", + want: "kopia", + }, + { + name: "kopia dataMover is kopia", + input: "kopia", + want: "kopia", + }, + { + name: "restic dataMover is restic", + input: "restic", + want: "restic", + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(tt *testing.T) { + assert.Equal(tt, tc.want, GetUploaderType(tc.input)) + }) + } +} diff --git a/pkg/install/install.go b/pkg/install/install.go index 6eebb9c73a..c76ecab6e2 100644 --- a/pkg/install/install.go +++ b/pkg/install/install.go @@ -55,7 +55,7 @@ var kindToResource = map[string]string{ "VolumeSnapshotLocation": "volumesnapshotlocations", } -// ResourceGroup represents a collection of kubernetes objects with a common ready condition +// ResourceGroup represents a collection of Kubernetes objects with a common ready condition type ResourceGroup struct { CRDResources []*unstructured.Unstructured OtherResources []*unstructured.Unstructured @@ -164,7 +164,7 @@ func isAvailable(c appsv1.DeploymentCondition) bool { return false } -// DeploymentIsReady will poll the kubernetes API server to see if the velero deployment is ready to service user requests. +// DeploymentIsReady will poll the Kubernetes API server to see if the velero deployment is ready to service user requests. func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, error) { gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "Deployment") apiResource := metav1.APIResource{ @@ -206,7 +206,7 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e return isReady, err } -// DaemonSetIsReady will poll the kubernetes API server to ensure the node-agent daemonset is ready, i.e. that +// DaemonSetIsReady will poll the Kubernetes API server to ensure the node-agent daemonset is ready, i.e. that // pods are scheduled and available on all of the desired nodes. func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, error) { gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "DaemonSet") @@ -252,7 +252,7 @@ func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, er return isReady, err } -// GroupResources groups resources based on whether the resources are CustomResourceDefinitions or other types of kubernetes objects +// GroupResources groups resources based on whether the resources are CustomResourceDefinitions or other types of Kubernetes objects // This is useful to wait for readiness before creating CRD objects func GroupResources(resources *unstructured.UnstructuredList) *ResourceGroup { rg := new(ResourceGroup) diff --git a/pkg/label/label.go b/pkg/label/label.go index 4b7445d1a5..445e4af469 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -27,7 +27,7 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -// GetValidName converts an input string to valid kubernetes label string in accordance to rfc1035 DNS Label spec +// GetValidName converts an input string to valid Kubernetes label string in accordance to rfc1035 DNS Label spec // (https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md) // Length of the label is adjusted basis the DNS1035LabelMaxLength (defined at k8s.io/apimachinery/pkg/util/validation) // If length exceeds, we trim the label name to contain only max allowed characters diff --git a/pkg/plugin/framework/server.go b/pkg/plugin/framework/server.go index 3c3871fac8..d1d87aecb3 100644 --- a/pkg/plugin/framework/server.go +++ b/pkg/plugin/framework/server.go @@ -128,7 +128,7 @@ func NewServer() Server { func (s *server) BindFlags(flags *pflag.FlagSet) Server { flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", "))) s.flagSet = flags - s.flagSet.ParseErrorsWhitelist.UnknownFlags = true + s.flagSet.ParseErrorsWhitelist.UnknownFlags = true // Velero.io word list : ignore return s } diff --git a/pkg/plugin/velero/backupitemaction/v2/backup_item_action.go b/pkg/plugin/velero/backupitemaction/v2/backup_item_action.go index 70c1429ee7..3c23802f22 100644 --- a/pkg/plugin/velero/backupitemaction/v2/backup_item_action.go +++ b/pkg/plugin/velero/backupitemaction/v2/backup_item_action.go @@ -47,7 +47,7 @@ type BackupItemAction interface { // initiate (asynchronous) operations, and a second slice of ResourceIdentifiers specifying related items // which should be backed up after all operations have completed. This last field will be // ignored if operationID is empty, and should not be filled in unless the resource must be updated in the - // backup after operations complete (i.e. some of the item's kubernetes metadata will be updated + // backup after operations complete (i.e. some of the item's Kubernetes metadata will be updated // during the operation which will be required during restore) // Note that (async) operations are not supported for items being backed up during Finalize phases, // so a plugin should not return an OperationID if the backup phase is "Finalizing" diff --git a/pkg/repository/manager.go b/pkg/repository/manager.go index aeaf0ddf58..3e412a73a4 100644 --- a/pkg/repository/manager.go +++ b/pkg/repository/manager.go @@ -36,18 +36,18 @@ import ( type SnapshotIdentifier struct { // VolumeNamespace is the namespace of the pod/volume that // the snapshot is for. - VolumeNamespace string + VolumeNamespace string `json:"volumeNamespace"` // BackupStorageLocation is the backup's storage location // name. - BackupStorageLocation string + BackupStorageLocation string `json:"backupStorageLocation"` // SnapshotID is the short ID of the snapshot. - SnapshotID string + SnapshotID string `json:"snapshotID"` // RepositoryType is the type of the repository where the // snapshot is stored - RepositoryType string + RepositoryType string `json:"repositoryType"` } // Manager manages backup repositories. diff --git a/pkg/restore/dataupload_retrieve_action.go b/pkg/restore/dataupload_retrieve_action.go index a691d653c5..c345e908ff 100644 --- a/pkg/restore/dataupload_retrieve_action.go +++ b/pkg/restore/dataupload_retrieve_action.go @@ -25,7 +25,8 @@ import ( corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" @@ -34,14 +35,14 @@ import ( ) type DataUploadRetrieveAction struct { - logger logrus.FieldLogger - configMapClient corev1client.ConfigMapInterface + logger logrus.FieldLogger + client client.Client } -func NewDataUploadRetrieveAction(logger logrus.FieldLogger, configMapClient corev1client.ConfigMapInterface) *DataUploadRetrieveAction { +func NewDataUploadRetrieveAction(logger logrus.FieldLogger, client client.Client) *DataUploadRetrieveAction { return &DataUploadRetrieveAction{ - logger: logger, - configMapClient: configMapClient, + logger: logger, + client: client, } } @@ -60,8 +61,18 @@ func (d *DataUploadRetrieveAction) Execute(input *velero.RestoreItemActionExecut return nil, errors.Wrap(err, "unable to convert unstructured item to DataUpload.") } + backup := &velerov1api.Backup{} + err := d.client.Get(context.Background(), types.NamespacedName{ + Namespace: input.Restore.Namespace, + Name: input.Restore.Spec.BackupName, + }, backup) + if err != nil { + d.logger.WithError(err).Errorf("Fail to get backup for restore %s.", input.Restore.Name) + return nil, errors.Wrapf(err, "error to get backup for restore %s", input.Restore.Name) + } + dataUploadResult := velerov2alpha1.DataUploadResult{ - BackupStorageLocation: dataUpload.Spec.BackupStorageLocation, + BackupStorageLocation: backup.Spec.StorageLocation, DataMover: dataUpload.Spec.DataMover, SnapshotID: dataUpload.Status.SnapshotID, SourceNamespace: dataUpload.Spec.SourceNamespace, @@ -93,7 +104,7 @@ func (d *DataUploadRetrieveAction) Execute(input *velero.RestoreItemActionExecut }, } - _, err = d.configMapClient.Create(context.Background(), &cm, metav1.CreateOptions{}) + err = d.client.Create(context.Background(), &cm, &client.CreateOptions{}) if err != nil { d.logger.Errorf("fail to create DataUploadResult ConfigMap %s/%s: %s", cm.Namespace, cm.Name, err.Error()) return nil, errors.Wrap(err, "fail to create DataUploadResult ConfigMap") diff --git a/pkg/restore/dataupload_retrieve_action_test.go b/pkg/restore/dataupload_retrieve_action_test.go index be8c653681..2624f0886b 100644 --- a/pkg/restore/dataupload_retrieve_action_test.go +++ b/pkg/restore/dataupload_retrieve_action_test.go @@ -23,10 +23,11 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" @@ -37,31 +38,58 @@ import ( ) func TestDataUploadRetrieveActionExectue(t *testing.T) { + scheme := runtime.NewScheme() + velerov1.AddToScheme(scheme) + corev1.AddToScheme(scheme) + tests := []struct { name string dataUpload *velerov2alpha1.DataUpload restore *velerov1.Restore expectedDataUploadResult *corev1.ConfigMap expectedErr string + runtimeScheme *runtime.Scheme + veleroObjs []runtime.Object }{ { - name: "DataUploadRetrieve Action test", - dataUpload: builder.ForDataUpload("velero", "testDU").SourceNamespace("testNamespace").SourcePVC("testPVC").Result(), - restore: builder.ForRestore("velero", "testRestore").ObjectMeta(builder.WithUID("testingUID")).Result(), - expectedDataUploadResult: builder.ForConfigMap("velero", "").ObjectMeta(builder.WithGenerateName("testDU-"), builder.WithLabels(velerov1.PVCNamespaceNameLabel, "testNamespace.testPVC", velerov1.RestoreUIDLabel, "testingUID", velerov1.ResourceUsageLabel, string(velerov1.VeleroResourceUsageDataUploadResult))).Data("testingUID", `{"backupStorageLocation":"","sourceNamespace":"testNamespace"}`).Result(), + name: "error to find backup", + dataUpload: builder.ForDataUpload("velero", "testDU").SourceNamespace("testNamespace").SourcePVC("testPVC").Result(), + restore: builder.ForRestore("velero", "testRestore").ObjectMeta(builder.WithUID("testingUID")).Backup("testBackup").Result(), + runtimeScheme: scheme, + expectedErr: "error to get backup for restore testRestore: backups.velero.io \"testBackup\" not found", + }, + { + name: "DataUploadRetrieve Action test", + dataUpload: builder.ForDataUpload("velero", "testDU").SourceNamespace("testNamespace").SourcePVC("testPVC").Result(), + restore: builder.ForRestore("velero", "testRestore").ObjectMeta(builder.WithUID("testingUID")).Backup("testBackup").Result(), + runtimeScheme: scheme, + veleroObjs: []runtime.Object{ + builder.ForBackup("velero", "testBackup").StorageLocation("testLocation").Result(), + }, + expectedDataUploadResult: builder.ForConfigMap("velero", "").ObjectMeta(builder.WithGenerateName("testDU-"), builder.WithLabels(velerov1.PVCNamespaceNameLabel, "testNamespace.testPVC", velerov1.RestoreUIDLabel, "testingUID", velerov1.ResourceUsageLabel, string(velerov1.VeleroResourceUsageDataUploadResult))).Data("testingUID", `{"backupStorageLocation":"testLocation","sourceNamespace":"testNamespace"}`).Result(), }, { - name: "Long source namespace and PVC name should also work", - dataUpload: builder.ForDataUpload("velero", "testDU").SourceNamespace("migre209d0da-49c7-45ba-8d5a-3e59fd591ec1").SourcePVC("kibishii-data-kibishii-deployment-0").Result(), - restore: builder.ForRestore("velero", "testRestore").ObjectMeta(builder.WithUID("testingUID")).Result(), - expectedDataUploadResult: builder.ForConfigMap("velero", "").ObjectMeta(builder.WithGenerateName("testDU-"), builder.WithLabels(velerov1.PVCNamespaceNameLabel, "migre209d0da-49c7-45ba-8d5a-3e59fd591ec1.kibishii-data-ki152333", velerov1.RestoreUIDLabel, "testingUID", velerov1.ResourceUsageLabel, string(velerov1.VeleroResourceUsageDataUploadResult))).Data("testingUID", `{"backupStorageLocation":"","sourceNamespace":"migre209d0da-49c7-45ba-8d5a-3e59fd591ec1"}`).Result(), + name: "Long source namespace and PVC name should also work", + dataUpload: builder.ForDataUpload("velero", "testDU").SourceNamespace("migre209d0da-49c7-45ba-8d5a-3e59fd591ec1").SourcePVC("kibishii-data-kibishii-deployment-0").Result(), + restore: builder.ForRestore("velero", "testRestore").ObjectMeta(builder.WithUID("testingUID")).Backup("testBackup").Result(), + runtimeScheme: scheme, + veleroObjs: []runtime.Object{ + builder.ForBackup("velero", "testBackup").StorageLocation("testLocation").Result(), + }, + expectedDataUploadResult: builder.ForConfigMap("velero", "").ObjectMeta(builder.WithGenerateName("testDU-"), builder.WithLabels(velerov1.PVCNamespaceNameLabel, "migre209d0da-49c7-45ba-8d5a-3e59fd591ec1.kibishii-data-ki152333", velerov1.RestoreUIDLabel, "testingUID", velerov1.ResourceUsageLabel, string(velerov1.VeleroResourceUsageDataUploadResult))).Data("testingUID", `{"backupStorageLocation":"testLocation","sourceNamespace":"migre209d0da-49c7-45ba-8d5a-3e59fd591ec1"}`).Result(), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { logger := velerotest.NewLogger() - cmClient := fake.NewSimpleClientset() + + fakeClientBuilder := fake.NewClientBuilder() + if tc.runtimeScheme != nil { + fakeClientBuilder = fakeClientBuilder.WithScheme(tc.runtimeScheme) + } + + fakeClient := fakeClientBuilder.WithRuntimeObjects(tc.veleroObjs...).Build() var unstructuredDataUpload map[string]interface{} if tc.dataUpload != nil { @@ -74,21 +102,28 @@ func TestDataUploadRetrieveActionExectue(t *testing.T) { ItemFromBackup: &unstructured.Unstructured{Object: unstructuredDataUpload}, } - action := NewDataUploadRetrieveAction(logger, cmClient.CoreV1().ConfigMaps("velero")) + action := NewDataUploadRetrieveAction(logger, fakeClient) _, err := action.Execute(&input) if tc.expectedErr != "" { require.Equal(t, tc.expectedErr, err.Error()) + } else { + require.NoError(t, err) } - require.NoError(t, err) if tc.expectedDataUploadResult != nil { - cmList, err := cmClient.CoreV1().ConfigMaps("velero").List(context.Background(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s,%s=%s", velerov1.RestoreUIDLabel, "testingUID", velerov1.PVCNamespaceNameLabel, label.GetValidName(tc.dataUpload.Spec.SourceNamespace+"."+tc.dataUpload.Spec.SourcePVC)), + var cmList corev1.ConfigMapList + err := fakeClient.List(context.Background(), &cmList, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1.RestoreUIDLabel: "testingUID", + velerov1.PVCNamespaceNameLabel: label.GetValidName(tc.dataUpload.Spec.SourceNamespace + "." + tc.dataUpload.Spec.SourcePVC), + }), }) + require.NoError(t, err) // debug fmt.Printf("CM: %s\n", &cmList.Items[0]) - require.Equal(t, *tc.expectedDataUploadResult, cmList.Items[0]) + require.Equal(t, tc.expectedDataUploadResult.Labels, cmList.Items[0].Labels) + require.Equal(t, tc.expectedDataUploadResult.Data, cmList.Items[0].Data) } }) } diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index 379d4f2655..eaf0273ff5 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -19,7 +19,6 @@ package provider import ( "context" "errors" - "io/ioutil" "os" "strings" "testing" @@ -211,13 +210,13 @@ func TestResticRunRestore(t *testing.T) { func TestClose(t *testing.T) { t.Run("Delete existing credentials file", func(t *testing.T) { // Create temporary files for the credentials and caCert - credentialsFile, err := ioutil.TempFile("", "credentialsFile") + credentialsFile, err := os.CreateTemp("", "credentialsFile") if err != nil { t.Fatalf("failed to create temp file: %v", err) } defer os.Remove(credentialsFile.Name()) - caCertFile, err := ioutil.TempFile("", "caCertFile") + caCertFile, err := os.CreateTemp("", "caCertFile") if err != nil { t.Fatalf("failed to create temp file: %v", err) } @@ -240,7 +239,7 @@ func TestClose(t *testing.T) { t.Run("Delete existing caCert file", func(t *testing.T) { // Create temporary files for the credentials and caCert - caCertFile, err := ioutil.TempFile("", "caCertFile") + caCertFile, err := os.CreateTemp("", "caCertFile") if err != nil { t.Fatalf("failed to create temp file: %v", err) } diff --git a/restore-hooks_product-requirements.md b/restore-hooks_product-requirements.md index 85234c0d87..1b943fe82b 100644 --- a/restore-hooks_product-requirements.md +++ b/restore-hooks_product-requirements.md @@ -99,7 +99,7 @@ The following use cases must be included as part of the Velero restore hooks MVP **Title: **Allow restore hook to run on non-kubernetes databases - **Description: **As a user, I would like to run restore hook operations even on databases that are external to kubernetes (such as postgres, elastic, etc…). + **Description: **As a user, I would like to run restore hook operations even on databases that are external to Kubernetes (such as postgres, elastic, etc…). **______________________________________________________________** @@ -291,7 +291,7 @@ The following requirements are out of scope for the Velero Restore Hooks MVP: 1. Verifying the integrity of a backup, resource, or other artifact will not be included in the scope of this effort. -2. Verifying the integrity of a snapshot using kubernetes hash checks. +2. Verifying the integrity of a snapshot using Kubernetes hash checks. 3. Running concurrent restore operations (for the MVP) a secondary epic will be opened to align better with the concurrent workload operations currently set on the Velero roadmap for Q4 timeframe. **Questions** diff --git a/site/content/_index.md b/site/content/_index.md index b4802d4636..fc131c1ce0 100644 --- a/site/content/_index.md +++ b/site/content/_index.md @@ -29,7 +29,7 @@ hero: secondary_ctas: cta1: title: Introduction to Velero - url: /blog/Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters/ + url: /blog/Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters/ # Velero.io word list : ignore content: Learn about Velero and how to protect your Kubernetes resources and volumes. cta2: title: How Do You Use Velero? diff --git a/site/content/docs/main/api-types/restore.md b/site/content/docs/main/api-types/restore.md index e475955ee0..453ab5cf4c 100644 --- a/site/content/docs/main/api-types/restore.md +++ b/site/content/docs/main/api-types/restore.md @@ -103,7 +103,7 @@ spec: # so that the exposed port numbers on the node will remain the same after restore. Optional preserveNodePorts: true # existingResourcePolicy specifies the restore behaviour - # for the kubernetes resource to be restored. Optional + # for the Kubernetes resource to be restored. Optional existingResourcePolicy: none # Actions to perform during or post restore. The only hooks currently supported are # adding an init container to a pod before it can be restored and executing a command in a diff --git a/site/content/docs/main/backup-reference.md b/site/content/docs/main/backup-reference.md index b0eddab293..3d54cfb57d 100644 --- a/site/content/docs/main/backup-reference.md +++ b/site/content/docs/main/backup-reference.md @@ -63,6 +63,8 @@ This command will immediately trigger a new backup based on your template for `e ### Limitation + +#### Backup's OwnerReference with Schedule Backups created from schedule can have owner reference to the schedule. This can be achieved by command: ``` @@ -74,7 +76,23 @@ Please do notice there is also side effect that may not be expected. Because sch If there is possibility the schedule will be disable to not create backup anymore, and the created backups are still useful. Please do not enable this option. For detail, please reference to [Backups created by a schedule with useOwnerReferenceInBackup set do not get synced properly](https://github.com/vmware-tanzu/velero/issues/4093). +#### Cannot support backup data immutability +Starting from 1.11, Velero's backups may not work as expected when the target object storage has some kind of an "immutability" option configured. These options are known by different names (see links below for some examples). The main reason is that Velero first saves the state of a backup as Finalizing and then checks whether there are any async operations in progress. If there are, it needs to wait for all of them to be finished before moving the backup state to Complete. If there are no async operations, the state is moved to Complete right away. In either case, Velero needs to modify the metadata in object storage and that will not be possible if some kind of immutability is configured on the object storage. + +Even with versions prior to 1.11, there was no explicit support in Velero to work with object storage that has "immutability" configuration. As a result, you may see some problems even though backups seem to work (e.g. versions objects not being deleted when backup is deleted). + +Note that backups may still work in some cases depending on specific providers and configurations. + +* For AWS S3 service, backups work because S3's object lock only applies to versioned buckets, and the object data can still be updated as the new version. But when backups are deleted, old versions of the objects will not be deleted. +* Azure Storage Blob supports both versioned-level immutability and container-level immutability. For the versioned-level scenario, data immutability can still work in Velero, but the container-level cannot. +* GCP Cloud storage policy only supports bucket-level immutability, so there is no way to make it work in the GCP environment. + +The following are the links to cloud providers' documentation in this regard: +* [AWS S3 Using S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +* [Azure Storage Blob Containers - Lock Immutability Policy](https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-policy-configure-version-scope?tabs=azure-portal) +* [GCP cloud storage Retention policies and retention policy locks](https://cloud.google.com/storage/docs/bucket-lock) + ## Kubernetes API Pagination By default, Velero will paginate the LIST API call for each resource type in the Kubernetes API when collecting items into a backup. The `--client-page-size` flag for the Velero server configures the size of each page. diff --git a/site/content/docs/main/contributions/tencent-config.md b/site/content/docs/main/contributions/tencent-config.md index 592808c2dd..5651ae24e7 100644 --- a/site/content/docs/main/contributions/tencent-config.md +++ b/site/content/docs/main/contributions/tencent-config.md @@ -52,9 +52,9 @@ region=ap-guangzhou,s3ForcePathStyle="true",s3Url=https://cos.ap-guangzhou.myqcl Description of the parameters: -- `--provider`: Declares the type of plug-in provided by "aws". +- `--provider`: Declares the type of plugin provided by "aws". -- `--plugins`: Use the AWS S3 compatible API plug-in "velero-plugin-for-aws". +- `--plugins`: Use the AWS S3 compatible API plugin "velero-plugin-for-aws". - `--bucket`: The bucket name created at Tencent Cloud COS. diff --git a/site/content/docs/main/customize-installation.md b/site/content/docs/main/customize-installation.md index 786d74dd68..0d8621685d 100644 --- a/site/content/docs/main/customize-installation.md +++ b/site/content/docs/main/customize-installation.md @@ -122,7 +122,7 @@ velero install \ ### Update resource requests and limits after install -After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DeamonSet spec, if you are using the File System Backup. +After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DaemonSet spec, if you are using the File System Backup. **Velero pod** @@ -135,7 +135,7 @@ kubectl patch deployment velero -n velero --patch \ **node-agent pod** -Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DeamonSet spec. +Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DaemonSet spec. ```bash kubectl patch daemonset node-agent -n velero --patch \ diff --git a/site/content/docs/main/file-system-backup.md b/site/content/docs/main/file-system-backup.md index 2521f3f34b..1d443ee86f 100644 --- a/site/content/docs/main/file-system-backup.md +++ b/site/content/docs/main/file-system-backup.md @@ -125,7 +125,7 @@ To mount the correct hostpath to pods volumes, run the node-agent pod in `privil If node-agent is not running in a privileged mode, it will not be able to access pods volumes within the mounted hostpath directory because of the default enforced SELinux mode configured in the host system level. You can [create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the -security in your cluster so that node-agent pods are allowed to use the hostPath volume plug-in without granting +security in your cluster so that node-agent pods are allowed to use the hostPath volume plugin without granting them access to the `privileged` SCC. By default a userland openshift namespace will not schedule pods on all nodes in the cluster. diff --git a/site/content/docs/main/performance-guidance.md b/site/content/docs/main/performance-guidance.md index dc8284ecf6..8596b4a52d 100644 --- a/site/content/docs/main/performance-guidance.md +++ b/site/content/docs/main/performance-guidance.md @@ -142,7 +142,7 @@ Compression is either disabled or not unavailable for both uploader. | Kopia | 4c4g |1m35s | 75% |248 MB |10 GB | | Restic | 4c4g |3m17s | 171% |126 MB |10 GB | #### conclusion: -- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic upoader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader. +- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic uploader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader. - For the large backup size case, Restic uploader's repository size comes to normal ### Case 4: 900 files, 1 directory, 1.000GB per file total 900.000GB content diff --git a/site/content/docs/main/self-signed-certificates.md b/site/content/docs/main/self-signed-certificates.md index 9724606de4..b85924c734 100644 --- a/site/content/docs/main/self-signed-certificates.md +++ b/site/content/docs/main/self-signed-certificates.md @@ -50,7 +50,7 @@ You will need to change this setting on the server to make it work. ## Skipping TLS verification -**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premise environment. Using this flag in production is not recommended. +**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premises environment. Using this flag in production is not recommended. Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [File System Backup](file-system-backup.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands, diff --git a/site/content/docs/main/style-guide.md b/site/content/docs/main/style-guide.md index 1b98e59dde..5fb5e55ddb 100644 --- a/site/content/docs/main/style-guide.md +++ b/site/content/docs/main/style-guide.md @@ -261,17 +261,18 @@ nginx 1/1 Running 0 13s 10.200.0.4 worker0 A list of Velero-specific terms and words to be used consistently across the site. {{< table caption="Velero.io word list" >}} -|Trem|Usage| +|Term|Usage| |--- |--- | |Kubernetes|Kubernetes should always be capitalized.| |Docker|Docker should always be capitalized.| |Velero|Velero should always be capitalized.| |VMware|VMware should always be correctly capitalized.| -|On-premises|On-premises or on-prem rather than on-premise or other variations.| -|Backup|Backup rather than back up, back-up or other variations.| -|Plugin|Plugin rather than plug-in or other variations.| -|Allowlist|Use allowlist instead of whitelist.| -|Denylist|Use denylist instead of blacklist.| +|On-premises|On-premises or on-prem rather than on-premise or other variations.| +|Backup|Backup for noun or adjective, rather than back-up, back up or other variations.| +|Back up|Back up for verb, rather than other variations.| +|Plugin|Plugin rather than plug-in or other variations.| +|Allowlist|Use allowlist instead of whitelist.| +|Denylist|Use denylist instead of blacklist.| {{< /table >}} ## Markdown elements diff --git a/site/content/docs/main/upgrade-to-1.11.md b/site/content/docs/main/upgrade-to-1.11.md index f6ec1ae186..ee412a0fa6 100644 --- a/site/content/docs/main/upgrade-to-1.11.md +++ b/site/content/docs/main/upgrade-to-1.11.md @@ -44,7 +44,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw velero install --crds-only --dry-run -o yaml | kubectl apply -f - ``` - **NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on kubernetes version >= v1.16 + **NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on Kubernetes version >= v1.16 3. Update the container image and objects fields used by the Velero deployment and, optionally, the restic daemon set: diff --git a/site/content/docs/main/velero-install.md b/site/content/docs/main/velero-install.md index 7fdd0f9361..1ac1395d52 100644 --- a/site/content/docs/main/velero-install.md +++ b/site/content/docs/main/velero-install.md @@ -3,7 +3,7 @@ title: "Velero Install CLI" layout: docs --- -This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your kubernetes cluster. +This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your Kubernetes cluster. _NOTE_: `velero install` will, by default, use the CLI's version information to determine the version of the server components to deploy. This behavior may be overridden by using the `--image` flag. Refer to [Building Server Component Container Images][1]. diff --git a/site/content/docs/v1.11/api-types/restore.md b/site/content/docs/v1.11/api-types/restore.md index e475955ee0..453ab5cf4c 100644 --- a/site/content/docs/v1.11/api-types/restore.md +++ b/site/content/docs/v1.11/api-types/restore.md @@ -103,7 +103,7 @@ spec: # so that the exposed port numbers on the node will remain the same after restore. Optional preserveNodePorts: true # existingResourcePolicy specifies the restore behaviour - # for the kubernetes resource to be restored. Optional + # for the Kubernetes resource to be restored. Optional existingResourcePolicy: none # Actions to perform during or post restore. The only hooks currently supported are # adding an init container to a pod before it can be restored and executing a command in a diff --git a/site/content/docs/v1.11/backup-reference.md b/site/content/docs/v1.11/backup-reference.md index b0eddab293..00a1daf206 100644 --- a/site/content/docs/v1.11/backup-reference.md +++ b/site/content/docs/v1.11/backup-reference.md @@ -63,6 +63,7 @@ This command will immediately trigger a new backup based on your template for `e ### Limitation +#### Backup's OwnerReference with Schedule Backups created from schedule can have owner reference to the schedule. This can be achieved by command: ``` @@ -74,6 +75,23 @@ Please do notice there is also side effect that may not be expected. Because sch If there is possibility the schedule will be disable to not create backup anymore, and the created backups are still useful. Please do not enable this option. For detail, please reference to [Backups created by a schedule with useOwnerReferenceInBackup set do not get synced properly](https://github.com/vmware-tanzu/velero/issues/4093). +#### Cannot support backup data immutability +Starting from 1.11, Velero's backups may not work as expected when the target object storage has some kind of an "immutability" option configured. These options are known by different names (see links below for some examples). The main reason is that Velero first saves the state of a backup as Finalizing and then checks whether there are any async operations in progress. If there are, it needs to wait for all of them to be finished before moving the backup state to Complete. If there are no async operations, the state is moved to Complete right away. In either case, Velero needs to modify the metadata in object storage and that will not be possible if some kind of immutability is configured on the object storage. + +Even with versions prior to 1.11, there was no explicit support in Velero to work with object storage that has "immutability" configuration. As a result, you may see some problems even though backups seem to work (e.g. versions objects not being deleted when backup is deleted). + +Note that backups may still work in some cases depending on specific providers and configurations. + +* For AWS S3 service, backups work because S3's object lock only applies to versioned buckets, and the object data can still be updated as the new version. But when backups are deleted, old versions of the objects will not be deleted. +* Azure Storage Blob supports both versioned-level immutability and container-level immutability. For the versioned-level scenario, data immutability can still work in Velero, but the container-level cannot. +* GCP Cloud storage policy only supports bucket-level immutability, so there is no way to make it work in the GCP environment. + +The following are the links to cloud providers' documentation in this regard: + +* [AWS S3 Using S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +* [Azure Storage Blob Containers - Lock Immutability Policy](https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-policy-configure-version-scope?tabs=azure-portal) +* [GCP cloud storage Retention policies and retention policy locks](https://cloud.google.com/storage/docs/bucket-lock) + ## Kubernetes API Pagination diff --git a/site/content/docs/v1.11/contributions/tencent-config.md b/site/content/docs/v1.11/contributions/tencent-config.md index 592808c2dd..5651ae24e7 100644 --- a/site/content/docs/v1.11/contributions/tencent-config.md +++ b/site/content/docs/v1.11/contributions/tencent-config.md @@ -52,9 +52,9 @@ region=ap-guangzhou,s3ForcePathStyle="true",s3Url=https://cos.ap-guangzhou.myqcl Description of the parameters: -- `--provider`: Declares the type of plug-in provided by "aws". +- `--provider`: Declares the type of plugin provided by "aws". -- `--plugins`: Use the AWS S3 compatible API plug-in "velero-plugin-for-aws". +- `--plugins`: Use the AWS S3 compatible API plugin "velero-plugin-for-aws". - `--bucket`: The bucket name created at Tencent Cloud COS. diff --git a/site/content/docs/v1.11/customize-installation.md b/site/content/docs/v1.11/customize-installation.md index 4d19bf635c..e3a8340037 100644 --- a/site/content/docs/v1.11/customize-installation.md +++ b/site/content/docs/v1.11/customize-installation.md @@ -122,7 +122,7 @@ velero install \ ### Update resource requests and limits after install -After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DeamonSet spec, if you are using the File System Backup. +After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DaemonSet spec, if you are using the File System Backup. **Velero pod** @@ -135,7 +135,7 @@ kubectl patch deployment velero -n velero --patch \ **node-agent pod** -Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DeamonSet spec. +Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DaemonSet spec. ```bash kubectl patch daemonset node-agent -n velero --patch \ diff --git a/site/content/docs/v1.11/file-system-backup.md b/site/content/docs/v1.11/file-system-backup.md index 431816f18b..2502ae5693 100644 --- a/site/content/docs/v1.11/file-system-backup.md +++ b/site/content/docs/v1.11/file-system-backup.md @@ -125,7 +125,7 @@ To mount the correct hostpath to pods volumes, run the node-agent pod in `privil If node-agent is not running in a privileged mode, it will not be able to access pods volumes within the mounted hostpath directory because of the default enforced SELinux mode configured in the host system level. You can [create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the -security in your cluster so that node-agent pods are allowed to use the hostPath volume plug-in without granting +security in your cluster so that node-agent pods are allowed to use the hostPath volume plugin without granting them access to the `privileged` SCC. By default a userland openshift namespace will not schedule pods on all nodes in the cluster. diff --git a/site/content/docs/v1.11/performance-guidance.md b/site/content/docs/v1.11/performance-guidance.md index dc8284ecf6..8596b4a52d 100644 --- a/site/content/docs/v1.11/performance-guidance.md +++ b/site/content/docs/v1.11/performance-guidance.md @@ -142,7 +142,7 @@ Compression is either disabled or not unavailable for both uploader. | Kopia | 4c4g |1m35s | 75% |248 MB |10 GB | | Restic | 4c4g |3m17s | 171% |126 MB |10 GB | #### conclusion: -- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic upoader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader. +- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic uploader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader. - For the large backup size case, Restic uploader's repository size comes to normal ### Case 4: 900 files, 1 directory, 1.000GB per file total 900.000GB content diff --git a/site/content/docs/v1.11/self-signed-certificates.md b/site/content/docs/v1.11/self-signed-certificates.md index 9724606de4..b85924c734 100644 --- a/site/content/docs/v1.11/self-signed-certificates.md +++ b/site/content/docs/v1.11/self-signed-certificates.md @@ -50,7 +50,7 @@ You will need to change this setting on the server to make it work. ## Skipping TLS verification -**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premise environment. Using this flag in production is not recommended. +**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premises environment. Using this flag in production is not recommended. Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [File System Backup](file-system-backup.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands, diff --git a/site/content/docs/v1.11/style-guide.md b/site/content/docs/v1.11/style-guide.md index 1b98e59dde..4976367668 100644 --- a/site/content/docs/v1.11/style-guide.md +++ b/site/content/docs/v1.11/style-guide.md @@ -261,17 +261,17 @@ nginx 1/1 Running 0 13s 10.200.0.4 worker0 A list of Velero-specific terms and words to be used consistently across the site. {{< table caption="Velero.io word list" >}} -|Trem|Usage| +|Term|Usage| |--- |--- | |Kubernetes|Kubernetes should always be capitalized.| |Docker|Docker should always be capitalized.| |Velero|Velero should always be capitalized.| |VMware|VMware should always be correctly capitalized.| -|On-premises|On-premises or on-prem rather than on-premise or other variations.| -|Backup|Backup rather than back up, back-up or other variations.| -|Plugin|Plugin rather than plug-in or other variations.| -|Allowlist|Use allowlist instead of whitelist.| -|Denylist|Use denylist instead of blacklist.| +|On-premises|On-premises or on-prem rather than on-premise or other variations.| +|Backup|Backup rather than back up, back-up or other variations.| +|Plugin|Plugin rather than plug-in or other variations.| +|Allowlist|Use allowlist instead of whitelist.| +|Denylist|Use denylist instead of blacklist.| {{< /table >}} ## Markdown elements diff --git a/site/content/docs/v1.11/upgrade-to-1.11.md b/site/content/docs/v1.11/upgrade-to-1.11.md index f6ec1ae186..ee412a0fa6 100644 --- a/site/content/docs/v1.11/upgrade-to-1.11.md +++ b/site/content/docs/v1.11/upgrade-to-1.11.md @@ -44,7 +44,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw velero install --crds-only --dry-run -o yaml | kubectl apply -f - ``` - **NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on kubernetes version >= v1.16 + **NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on Kubernetes version >= v1.16 3. Update the container image and objects fields used by the Velero deployment and, optionally, the restic daemon set: diff --git a/site/content/docs/v1.11/velero-install.md b/site/content/docs/v1.11/velero-install.md index 7fdd0f9361..1ac1395d52 100644 --- a/site/content/docs/v1.11/velero-install.md +++ b/site/content/docs/v1.11/velero-install.md @@ -3,7 +3,7 @@ title: "Velero Install CLI" layout: docs --- -This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your kubernetes cluster. +This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your Kubernetes cluster. _NOTE_: `velero install` will, by default, use the CLI's version information to determine the version of the server components to deploy. This behavior may be overridden by using the `--image` flag. Refer to [Building Server Component Container Images][1]. diff --git a/site/content/posts/2019-04-09-Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters.md b/site/content/posts/2019-04-09-Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters.md index 141c69a550..35df9c5180 100644 --- a/site/content/posts/2019-04-09-Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters.md +++ b/site/content/posts/2019-04-09-Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters.md @@ -1,6 +1,6 @@ --- title: Velero is an Open Source Tool to Back up and Migrate Kubernetes Clusters -slug: Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters +slug: Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters # Velero.io word list : ignore # image: https://placehold.it/200x200 excerpt: Velero is an open source tool to safely back up, recover, and migrate Kubernetes clusters and persistent volumes. It works both on premises and in a public cloud. author_name: Velero Team @@ -31,4 +31,4 @@ Since Velero was initially released in August 2017, we’ve had nearly 70 contri We are continuing to work towards Velero 1.0 and would love your help working on the items in our roadmap. If you’re interested in contributing, we have a number of GitHub issues labeled as [Good First Issue](https://github.com/vmware-tanzu/velero/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+first+issue%22) and [Help Wanted](https://github.com/vmware-tanzu/velero/issues?q=is%3Aopen+is%3Aissue+label%3A%22Help+wanted%22), including items related to Prometheus metrics, the CLI UX, improved documentation, and more. We are more than happy to work with new and existing contributors alike. -_Previously posted at: _ +_Previously posted at: _ diff --git a/site/content/posts/2019-08-22-announcing-velero-1.1.md b/site/content/posts/2019-08-22-announcing-velero-1.1.md index 8a7300af40..3db6156af6 100644 --- a/site/content/posts/2019-08-22-announcing-velero-1.1.md +++ b/site/content/posts/2019-08-22-announcing-velero-1.1.md @@ -28,7 +28,7 @@ A big focus of our work this cycle was continuing to improve support for restic. Along with our bug fixes, we’ve provided an easier way to move restic backups between storage providers. Different providers often have different StorageClasses, requiring user intervention to make restores successfully complete. -To make cross-provider moves simpler, we’ve introduced a StorageClass remapping plug-in. It allows you to automatically translate one StorageClass on PersistentVolumeClaims and PersistentVolumes to another. You can read more about it in our [documentation](https://velero.io/docs/v1.1.0/restore-reference/#changing-pv-pvc-storage-classes). +To make cross-provider moves simpler, we’ve introduced a StorageClass remapping plugin. It allows you to automatically translate one StorageClass on PersistentVolumeClaims and PersistentVolumes to another. You can read more about it in our [documentation](https://velero.io/docs/v1.1.0/restore-reference/#changing-pv-pvc-storage-classes). ## Quality-of-Life Improvements @@ -42,7 +42,7 @@ In order to help you better understand what resources have been backed up, we’ In the same vein, we’ve added the ability to put custom tags on cloud-provider snapshots. This approach should provide a better way to keep track of the resources being created in your cloud account. To add a label to a snapshot at backup time, use the `--labels` argument in the `velero backup create` command. -Our final change for increasing visibility into your Velero installation is the `velero plugin get` command. This command will report all the plug-ins within the Velero deployment.. +Our final change for increasing visibility into your Velero installation is the `velero plugin get` command. This command will report all the plugins within the Velero deployment.. Velero has previously used a restore-only flag on the server to control whether a cluster could write backups to object storage. With Velero 1.1, we’ve now moved the restore-only behavior into read-only BackupStorageLocations. This move means that the Velero server can use a BackupStorageLocation as a source to restore from, but not for backups, while still retaining the ability to back up to other configured locations. In the future, the `--restore-only` flag will be removed in favor of configuring read-only BackupStorageLocations. diff --git a/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md b/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md index fd7d19b783..de95973249 100644 --- a/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md +++ b/site/content/posts/2019-10-10-Velero-v1-1-Stateful-Backup-vSphere.md @@ -274,7 +274,7 @@ No resources found. $ kubectl get pods -n cassandra No resources found. -$ kubestl get pvc -n cassandra +$ kubectl get pvc -n cassandra No resources found. ``` diff --git a/site/content/posts/2020-05-26-Velero-1.4-Community-Wave.md b/site/content/posts/2020-05-26-Velero-1.4-Community-Wave.md index 9ff7cdb8d0..c0c6c8190e 100644 --- a/site/content/posts/2020-05-26-Velero-1.4-Community-Wave.md +++ b/site/content/posts/2020-05-26-Velero-1.4-Community-Wave.md @@ -86,7 +86,7 @@ When deploying Velero on-premises, users have often asked for supporting a custo Some users may have noticed that when restoring a backup containing CustomResourceDefinitions, the corresponding custom resources were not always restored. However, when running another restore, everything ran successfully. -With Velero v1.4.0, we’ve revisited our Kuberneters API server group discovery code and allowed the restore code to detect CustomResourceDefinition groups as they get restored, rather simply relying on time-based refreshes. +With Velero v1.4.0, we’ve revisited our Kubernetes API server group discovery code and allowed the restore code to detect CustomResourceDefinition groups as they get restored, rather simply relying on time-based refreshes. ## Refactored CRD backup code diff --git a/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md b/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md index 8c65455aca..0d6938ebff 100644 --- a/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md +++ b/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md @@ -44,7 +44,7 @@ In this release, we introduce a new plugin type, DeleteItemAction plugin, that o The [velero-plugin-for-csi](https://github.com/vmware-tanzu/velero-plugin-for-csi) introduced a new pattern for backing up and restoring volume snapshots using BackupItemAction and RestoreItemAction plugins. To allow the community to adopt a similar pattern for their custom resources, Velero had to provide an extension point to clean up both in-cluster and external resources, created by their BackupItemAction plugins. This is now possible with DeleteItemAction plugins. The interface for this new plugin type is similar to that of BackupItemAction and RestoreItemAction plugins. You can read more about the design for this plugin in the [design documents of our repository on github](https://github.com/vmware-tanzu/velero/blob/main/design/delete-item-action.md). -### Code Mordernization +### Code Modernization Velero has been helping its users with disaster recovery for their Kubernetes clusters since its first release in August 2017. Over the past three years, there have been major improvements in the ecosystem, including new frameworks that make it easier to develop solutions for Kubernetes. This release marks the first steps in our journey to modernize the Velero codebase and take advantage of newer frameworks as we begin the adoption of [kubebuilder](https://book.kubebuilder.io/), the most popular framework to build custom Kubernetes APIs and their respective controllers. As this effort continues, we would like to invite more folks to be a part of our growing contributor base. diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 0fbf46e3ee..596585c0fc 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -115,7 +115,7 @@ run: ginkgo @[ "${BSL_BUCKET}" ] && echo "Using bucket ${BSL_BUCKET} to store backups from E2E tests" || \ (echo "Bucket to store the backups from E2E tests is required, please re-run with BSL_BUCKET="; exit 1 ) @[ "${CLOUD_PROVIDER}" ] && echo "Using cloud provider ${CLOUD_PROVIDER}" || \ - (echo "Cloud provider for target cloud/plug-in provider is required, please rerun with CLOUD_PROVIDER="; exit 1) + (echo "Cloud provider for target cloud/plugin provider is required, please rerun with CLOUD_PROVIDER="; exit 1) @$(GINKGO) -v $(FOCUS_STR) $(SKIP_STR) . -- -velerocli=$(VELERO_CLI) \ -velero-image=$(VELERO_IMAGE) \ -plugins=$(PLUGINS) \ diff --git a/test/e2e/README.md b/test/e2e/README.md index 151f0ebb8d..f845752fe8 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -9,7 +9,7 @@ If you previously ran unit tests using the `go test ./...` command or any of its ## Prerequisites Running the E2E tests expects: -1. A running kubernetes cluster: +1. A running Kubernetes cluster: 1. With DNS and CNI installed. 1. Compatible with Velero- running Kubernetes v1.10 or later. 1. With necessary storage drivers/provisioners installed. @@ -31,7 +31,7 @@ These configuration parameters are expected as values to the following command l 1. `-credentials-file`: File containing credentials for backup and volume provider. Required. 1. `-bucket`: Name of the object storage bucket where backups from e2e tests should be stored. Required. -1. `-cloud-provider`: The cloud the tests will be run in. Appropriate plug-ins will be installed except for kind which requires +1. `-cloud-provider`: The cloud the tests will be run in. Appropriate plugins will be installed except for kind which requires the object-store-provider to be specified. 1. `-object-store-provider`: Object store provider to use. Required when kind is the cloud provider. 1. `-velerocli`: Path to the velero application to use. Optional, by default uses `velero` in the `$PATH` diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index 8088e5390a..16a3003848 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -91,7 +91,7 @@ func BackupRestoreTest(useVolumeSnapshots bool) { } backupName = "backup-" + UUIDgen.String() restoreName = "restore-" + UUIDgen.String() - // Even though we are using Velero's CloudProvider plugin for object storage, the kubernetes cluster is running on + // Even though we are using Velero's CloudProvider plugin for object storage, the Kubernetes cluster is running on // KinD. So use the kind installation for Kibishii. // if set ProvideSnapshotsVolumeParam to false here, make sure set it true in other tests of this case diff --git a/test/e2e/backups/deletion.go b/test/e2e/backups/deletion.go index 4b65a39043..4e32d3ba38 100644 --- a/test/e2e/backups/deletion.go +++ b/test/e2e/backups/deletion.go @@ -132,7 +132,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupNam }) if providerName == "vsphere" && useVolumeSnapshots { - // Wait for uploads started by the Velero Plug-in for vSphere to complete + // Wait for uploads started by the Velero Plugin for vSphere to complete // TODO - remove after upload progress monitoring is implemented fmt.Println("Waiting for vSphere uploads to complete") if err := WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour, deletionTest, 2); err != nil { diff --git a/test/e2e/basic/api-group/enable_api_group_versions.go b/test/e2e/basic/api-group/enable_api_group_versions.go index 07878e2367..22095698ac 100644 --- a/test/e2e/basic/api-group/enable_api_group_versions.go +++ b/test/e2e/basic/api-group/enable_api_group_versions.go @@ -21,7 +21,6 @@ import ( "encoding/json" "flag" "fmt" - "io/ioutil" "os" "os/exec" "regexp" @@ -376,7 +375,7 @@ func installTestCRD(ctx context.Context, index int, group, path string) error { } func rerenderTestYaml(index int, group, path string) (string, error) { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { return "", errors.Wrapf(err, "failed to get %s when install test yaml", path) } @@ -399,7 +398,7 @@ func rerenderTestYaml(index int, group, path string) (string, error) { newContent = strings.ReplaceAll(newContent, group, fmt.Sprintf("%s.%d", group, index)) By(fmt.Sprintf("\n%s\n", newContent)) - tmpFile, err := ioutil.TempFile("", "test-yaml") + tmpFile, err := os.CreateTemp("", "test-yaml") if err != nil { return "", errors.Wrapf(err, "failed to create temp file when install storage class") } diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index e9c1eef82c..745331312a 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -179,7 +179,7 @@ func GetKubeconfigContext() error { func TestE2e(t *testing.T) { // Skip running E2E tests when running only "short" tests because: // 1. E2E tests are long running tests involving installation of Velero and performing backup and restore operations. - // 2. E2E tests require a kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags. + // 2. E2E tests require a Kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags. if testing.Short() { t.Skip("Skipping E2E tests") } diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 3a73592f52..9defd19c00 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -19,7 +19,6 @@ package filtering import ( "context" "fmt" - "io/ioutil" "os" "strings" "time" @@ -270,7 +269,7 @@ func (r *ResourcePoliciesCase) installTestStorageClasses(path string) error { if err != nil { return err } - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { return errors.Wrapf(err, "failed to get %s when install storage class", path) } @@ -278,7 +277,7 @@ func (r *ResourcePoliciesCase) installTestStorageClasses(path string) error { // replace sc to new value newContent := strings.ReplaceAll(string(content), "name: e2e-storage-class", "name: e2e-storage-class-2") - tmpFile, err := ioutil.TempFile("", "sc-file") + tmpFile, err := os.CreateTemp("", "sc-file") if err != nil { return errors.Wrapf(err, "failed to create temp file when install storage class") } diff --git a/test/perf/Makefile b/test/perf/Makefile index c97fa35a05..d8abcc5f15 100644 --- a/test/perf/Makefile +++ b/test/perf/Makefile @@ -86,7 +86,7 @@ run: ginkgo @[ "${BSL_BUCKET}" ] && echo "Using bucket ${BSL_BUCKET} to store backups from E2E tests" || \ (echo "Bucket to store the backups from E2E tests is required, please re-run with BSL_BUCKET="; exit 1 ) @[ "${CLOUD_PROVIDER}" ] && echo "Using cloud provider ${CLOUD_PROVIDER}" || \ - (echo "Cloud provider for target cloud/plug-in provider is required, please rerun with CLOUD_PROVIDER="; exit 1) + (echo "Cloud provider for target cloud/plugin provider is required, please rerun with CLOUD_PROVIDER="; exit 1) @$(GINKGO) -v $(FOCUS_STR) $(SKIP_STR) . -- -velerocli=$(VELERO_CLI) \ -velero-image=$(VELERO_IMAGE) \ -plugins=$(PLUGINS) \ diff --git a/test/perf/e2e_suite_test.go b/test/perf/e2e_suite_test.go index b6afb07149..9c2326f778 100644 --- a/test/perf/e2e_suite_test.go +++ b/test/perf/e2e_suite_test.go @@ -92,7 +92,7 @@ func TestE2e(t *testing.T) { By("Install test resources before testing TestE2e") // Skip running E2E tests when running only "short" tests because: // 1. E2E tests are long running tests involving installation of Velero and performing backup and restore operations. - // 2. E2E tests require a kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags. + // 2. E2E tests require a Kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags. if err := initConfig(); err != nil { fmt.Println(err) diff --git a/test/util/k8s/common.go b/test/util/k8s/common.go index 3754ee1f0f..7efb56add5 100644 --- a/test/util/k8s/common.go +++ b/test/util/k8s/common.go @@ -33,7 +33,7 @@ import ( "github.com/vmware-tanzu/velero/test/util/common" ) -// ensureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on. +// ensureClusterExists returns whether or not a Kubernetes cluster exists for tests to be run on. func EnsureClusterExists(ctx context.Context) error { return exec.CommandContext(ctx, "kubectl", "cluster-info").Run() } diff --git a/test/util/kibishii/kibishii_utils.go b/test/util/kibishii/kibishii_utils.go index 4beb6cf46f..0b2251112b 100644 --- a/test/util/kibishii/kibishii_utils.go +++ b/test/util/kibishii/kibishii_utils.go @@ -108,7 +108,7 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc fmt.Printf("VeleroBackupNamespace done %s\n", time.Now().Format("2006-01-02 15:04:05")) if useVolumeSnapshots { if providerName == "vsphere" { - // Wait for uploads started by the Velero Plug-in for vSphere to complete + // Wait for uploads started by the Velero Plugin for vSphere to complete // TODO - remove after upload progress monitoring is implemented fmt.Println("Waiting for vSphere uploads to complete") if err := WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour, kibishiiNamespace, 2); err != nil { @@ -134,7 +134,7 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc return errors.New(fmt.Sprintf("PVB count %d should be %d in namespace %s", len(pvbs), pvCount, kibishiiNamespace)) } if providerName == "vsphere" { - // Wait for uploads started by the Velero Plug-in for vSphere to complete + // Wait for uploads started by the Velero Plugin for vSphere to complete // TODO - remove after upload progress monitoring is implemented // TODO[High] - uncomment code block below when vSphere plugin PR #500 is included in release version. diff --git a/test/util/report/report.go b/test/util/report/report.go index e1b8e590e0..37d2dd704a 100644 --- a/test/util/report/report.go +++ b/test/util/report/report.go @@ -20,8 +20,9 @@ import ( "io/ioutil" "github.com/pkg/errors" - "github.com/vmware-tanzu/velero/test" "gopkg.in/yaml.v3" + + "github.com/vmware-tanzu/velero/test" ) func GenerateYamlReport() error { diff --git a/test/util/velero/install.go b/test/util/velero/install.go index a886147d62..0626c479da 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -80,7 +80,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *VeleroConfig, isStandbyCluste } } else { if veleroCfg.ObjectStoreProvider == "" { - return errors.New("No object store provider specified - must be specified when using kind as the cloud provider") // Gotta have an object store provider + return errors.New("No object store provider specified - must be specified when using kind as the cloud provider") // Must have an object store provider } } diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index c7a6cef3ea..1720de008f 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -677,7 +677,7 @@ func VeleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNa return nil } -// WaitForVSphereUploadCompletion waits for uploads started by the Velero Plug-in for vSphere to complete +// WaitForVSphereUploadCompletion waits for uploads started by the Velero Plugin for vSphere to complete // TODO - remove after upload progress monitoring is implemented func WaitForVSphereUploadCompletion(ctx context.Context, timeout time.Duration, namespace string, expectCount int) error { err := wait.PollImmediate(time.Second*5, timeout, func() (bool, error) {