diff --git a/changelogs/unreleased/8102-sseago b/changelogs/unreleased/8102-sseago new file mode 100644 index 0000000000..ed1c4bfa14 --- /dev/null +++ b/changelogs/unreleased/8102-sseago @@ -0,0 +1 @@ +ItemBlock model and phase 1 (single-thread) workflow changes diff --git a/design/backup-performance-improvements.md b/design/backup-performance-improvements.md index 9105a78802..9fdb2bc7c8 100644 --- a/design/backup-performance-improvements.md +++ b/design/backup-performance-improvements.md @@ -109,19 +109,27 @@ This mainly applies to plugins that operate on pods which reference resources wh ### Changes to processing item list from the Item Collector -#### New structs ItemBlock and ItemBlockItem +#### New structs BackupItemBlock, ItemBlock, and ItemBlockItem ```go -type ItemBlock struct { - log logrus.FieldLogger +package backup + +type BackupItemBlock struct { + itemblock.ItemBlock // This is a reference to the shared itemBackupper for the backup itemBackupper *itemBackupper +} + +package itemblock + +type ItemBlock struct { + Log logrus.FieldLogger Items []ItemBlockItem } type ItemBlockItem struct { - gr schema.GroupResource - item *unstructured.Unstructured - preferredGVR schema.GroupVersionResource + Gr schema.GroupResource + Item *unstructured.Unstructured + PreferredGVR schema.GroupVersionResource } ``` diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 33840e2e2e..0fb4a390c4 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -31,6 +31,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -46,6 +47,7 @@ import ( velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/discovery" + "github.com/vmware-tanzu/velero/pkg/itemblock" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/persistence" @@ -53,6 +55,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" + ibav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/itemblockaction/v1" vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podexec" "github.com/vmware-tanzu/velero/pkg/podvolume" @@ -77,6 +80,7 @@ type Backupper interface { backup *Request, backupFile io.Writer, actions []biav2.BackupItemAction, + itemBlockActions []ibav1.ItemBlockAction, volumeSnapshotterGetter VolumeSnapshotterGetter, ) error @@ -85,6 +89,7 @@ type Backupper interface { backupRequest *Request, backupFile io.Writer, backupItemActionResolver framework.BackupItemActionResolverV2, + itemBlockActionResolver framework.ItemBlockActionResolver, volumeSnapshotterGetter VolumeSnapshotterGetter, ) error @@ -210,9 +215,10 @@ type VolumeSnapshotterGetter interface { // back up individual resources that don't prevent the backup from continuing to be processed) are logged // to the backup log. func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Request, backupFile io.Writer, - actions []biav2.BackupItemAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error { + actions []biav2.BackupItemAction, itemBlockActions []ibav1.ItemBlockAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error { backupItemActions := framework.NewBackupItemActionResolverV2(actions) - return kb.BackupWithResolvers(log, backupRequest, backupFile, backupItemActions, volumeSnapshotterGetter) + itemBlockActionResolver := framework.NewItemBlockActionResolver(itemBlockActions) + return kb.BackupWithResolvers(log, backupRequest, backupFile, backupItemActions, itemBlockActionResolver, volumeSnapshotterGetter) } func (kb *kubernetesBackupper) BackupWithResolvers( @@ -220,6 +226,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers( backupRequest *Request, backupFile io.Writer, backupItemActionResolver framework.BackupItemActionResolverV2, + itemBlockActionResolver framework.ItemBlockActionResolver, volumeSnapshotterGetter VolumeSnapshotterGetter, ) error { gzippedData := gzip.NewWriter(backupFile) @@ -268,6 +275,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers( return err } + backupRequest.ResolvedItemBlockActions, err = itemBlockActionResolver.ResolveActions(kb.discoveryHelper, log) + if err != nil { + log.WithError(errors.WithStack(err)).Debugf("Error from itemBlockActionResolver.ResolveActions") + return err + } + backupRequest.BackedUpItems = map[itemKey]struct{}{} podVolumeTimeout := kb.podVolumeTimeout @@ -402,37 +415,72 @@ func (kb *kubernetesBackupper) BackupWithResolvers( }() backedUpGroupResources := map[schema.GroupResource]bool{} + // Maps items in the item list from GR+NamespacedName to a slice of pointers to kubernetesResources + // We need the slice value since if the EnableAPIGroupVersions feature flag is set, there may + // be more than one resource to back up for the given item. + itemsMap := make(map[velero.ResourceIdentifier][]*kubernetesResource) + for i := range items { + key := velero.ResourceIdentifier{ + GroupResource: items[i].groupResource, + Namespace: items[i].namespace, + Name: items[i].name, + } + itemsMap[key] = append(itemsMap[key], items[i]) + } - for i, item := range items { + var itemBlock *BackupItemBlock + + for i := range items { log.WithFields(map[string]interface{}{ "progress": "", - "resource": item.groupResource.String(), - "namespace": item.namespace, - "name": item.name, + "resource": items[i].groupResource.String(), + "namespace": items[i].namespace, + "name": items[i].name, }).Infof("Processing item") - // use an anonymous func so we can defer-close/remove the file - // as soon as we're done with it - func() { - var unstructured unstructured.Unstructured - - f, err := os.Open(item.path) - if err != nil { - log.WithError(errors.WithStack(err)).Error("Error opening file containing item") - return + // Skip if this item has already been added to an ItemBlock + if items[i].inItemBlock { + log.Infof("Skipping %s %s/%s because it's already in an ItemBlock", items[i].groupResource.String(), items[i].namespace, items[i].name) + } else { + if itemBlock == nil { + itemBlock = NewBackupItemBlock(log, itemBackupper) } - defer f.Close() - defer os.Remove(f.Name()) - - if err := json.NewDecoder(f).Decode(&unstructured); err != nil { - log.WithError(errors.WithStack(err)).Error("Error decoding JSON from file") - return + var newBlockItem *unstructured.Unstructured + + // If the EnableAPIGroupVersions feature flag is set, there could be multiple versions + // of this item to be backed up. Include all of them in the same ItemBlock + key := velero.ResourceIdentifier{ + GroupResource: items[i].groupResource, + Namespace: items[i].namespace, + Name: items[i].name, + } + allVersionsOfItem := itemsMap[key] + for _, itemVersion := range allVersionsOfItem { + unstructured := itemBlock.addKubernetesResource(itemVersion, log) + if newBlockItem == nil { + newBlockItem = unstructured + } } + // call GetRelatedItems, add found items to block if not in block, recursively until no more items + if newBlockItem != nil { + kb.executeItemBlockActions(log, newBlockItem, items[i].groupResource, items[i].name, items[i].namespace, itemsMap, itemBlock) + } + } - if backedUp := kb.backupItem(log, item.groupResource, itemBackupper, &unstructured, item.preferredGVR); backedUp { - backedUpGroupResources[item.groupResource] = true + // We skip calling backupItemBlock here so that we will add the next item to the the current + // ItemBlock if: + // 1) This is not the last item to be processed + // 2) Both current and next item are ordered resources + // 3) Both current and next item are for the same GroupResource + addNextToBlock := i < len(items)-1 && items[i].orderedResource && items[i+1].orderedResource && items[i].groupResource == items[i+1].groupResource + if itemBlock != nil && len(itemBlock.Items) > 0 && !addNextToBlock { + log.Infof("Backing Up Item Block including %s %s/%s (%s items in block)", items[i].groupResource.String(), items[i].namespace, items[i].name, len(itemBlock.Items)) + backedUpGRs := kb.backupItemBlock(*itemBlock) + for _, backedUpGR := range backedUpGRs { + backedUpGroupResources[backedUpGR] = true } - }() + itemBlock = nil + } // updated total is computed as "how many items we've backed up so far, plus // how many items we know of that are remaining" @@ -446,9 +494,9 @@ func (kb *kubernetesBackupper) BackupWithResolvers( log.WithFields(map[string]interface{}{ "progress": "", - "resource": item.groupResource.String(), - "namespace": item.namespace, - "name": item.name, + "resource": items[i].groupResource.String(), + "namespace": items[i].namespace, + "name": items[i].name, }).Infof("Backed up %d items out of an estimated total of %d (estimate will change throughout the backup)", len(backupRequest.BackedUpItems), totalItems) } @@ -501,8 +549,152 @@ func (kb *kubernetesBackupper) BackupWithResolvers( return nil } -func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource) bool { - backedUpItem, _, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, false, false) +func (kb *kubernetesBackupper) executeItemBlockActions( + log logrus.FieldLogger, + obj runtime.Unstructured, + groupResource schema.GroupResource, + name, namespace string, + itemsMap map[velero.ResourceIdentifier][]*kubernetesResource, + itemBlock *BackupItemBlock, +) { + metadata, err := meta.Accessor(obj) + if err != nil { + log.WithError(errors.WithStack(err)).Warn("Failed to get object metadata.") + return + } + for _, action := range itemBlock.itemBackupper.backupRequest.ResolvedItemBlockActions { + if !action.ShouldUse(groupResource, namespace, metadata, log) { + continue + } + log.Info("Executing ItemBlock action") + + relatedItems, err := action.GetRelatedItems(obj, itemBlock.itemBackupper.backupRequest.Backup) + if err != nil { + log.Error(errors.Wrapf(err, "error executing ItemBlock action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)) + continue + } + + for _, relatedItem := range relatedItems { + var newBlockItem *unstructured.Unstructured + // Look for item in itemsMap + itemsToAdd := itemsMap[relatedItem] + // if item is in the item collector list, we'll have at least one element. + // If EnableAPIGroupVersions is set, we may have more than one. + // If we get an unstructured obj back from addKubernetesResource, then it wasn't + // already in a block and we recursively look for related items in the returned item. + if len(itemsToAdd) > 0 { + for _, itemToAdd := range itemsToAdd { + unstructured := itemBlock.addKubernetesResource(itemToAdd, log) + if newBlockItem == nil { + newBlockItem = unstructured + } + } + if newBlockItem != nil { + kb.executeItemBlockActions(log, newBlockItem, relatedItem.GroupResource, relatedItem.Name, relatedItem.Namespace, itemsMap, itemBlock) + } + continue + } + // Item wasn't found in item collector list, get from cluster + gvr, resource, err := itemBlock.itemBackupper.discoveryHelper.ResourceFor(relatedItem.GroupResource.WithVersion("")) + if err != nil { + log.Error(errors.WithStack(err)) + continue + } + + client, err := itemBlock.itemBackupper.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, relatedItem.Namespace) + if err != nil { + log.Error(errors.WithStack(err)) + continue + } + + item, err := client.Get(relatedItem.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + log.WithFields(logrus.Fields{ + "groupResource": relatedItem.GroupResource, + "namespace": relatedItem.Namespace, + "name": relatedItem.Name, + }).Warnf("Related item was not found in Kubernetes API, can't add to item block") + continue + } + if err != nil { + log.Error(errors.WithStack(err)) + continue + } + itemsMap[relatedItem] = append(itemsMap[relatedItem], &kubernetesResource{ + groupResource: relatedItem.GroupResource, + preferredGVR: gvr, + namespace: relatedItem.Namespace, + name: relatedItem.Name, + inItemBlock: true, + }) + log.Infof("adding %s %s/%s to ItemBlock", relatedItem.GroupResource, relatedItem.Namespace, relatedItem.Name) + itemBlock.AddUnstructured(relatedItem.GroupResource, item, gvr) + kb.executeItemBlockActions(log, item, relatedItem.GroupResource, relatedItem.Name, relatedItem.Namespace, itemsMap, itemBlock) + } + } +} + +func (kb *kubernetesBackupper) backupItemBlock(itemBlock BackupItemBlock) []schema.GroupResource { + // find pods in ItemBlock + // filter pods based on whether they still need to be backed up + // this list will be used to run pre/post hooks + var postHookPods []itemblock.ItemBlockItem + var items []itemblock.ItemBlockItem + itemBlock.Log.Debug("Executing pre hooks") + for _, item := range itemBlock.Items { + if item.Gr == kuberesource.Pods { + skipHooks := false + metadata, err := meta.Accessor(item.Item) + if err != nil { + itemBlock.Log.WithError(errors.WithStack(err)).Error("Error accessing pod metadata") + skipHooks = true + } + // Don't run hooks if pod is excluded + if !skipHooks && !itemBlock.itemBackupper.itemInclusionChecks(itemBlock.Log, false, metadata, item.Item, item.Gr) { + skipHooks = true + } + if !skipHooks { + key := itemKey{ + resource: resourceKey(item.Item), + namespace: metadata.GetNamespace(), + name: metadata.GetName(), + } + // Don't run hooks if pod has already been backed up + if _, exists := itemBlock.itemBackupper.backupRequest.BackedUpItems[key]; !exists { + if err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, item.Gr, item.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, hook.PhasePre, itemBlock.itemBackupper.hookTracker); err != nil { + itemBlock.Log.WithError(err).WithField("name", item.Item.GetName()).Error("Error running hooks for pod") + // if pre hook fails, flag pod as backed-up and move on + itemBlock.itemBackupper.backupRequest.BackedUpItems[key] = struct{}{} + continue + } + postHookPods = append(postHookPods, item) + } + } + } + items = append(items, item) + + } + + itemBlock.Log.Debug("Backing up items in BackupItemBlock") + var grList []schema.GroupResource + for _, item := range items { + if backedUp := kb.backupItem(itemBlock.Log, item.Gr, itemBlock.itemBackupper, item.Item, item.PreferredGVR, &itemBlock); backedUp { + grList = append(grList, item.Gr) + } + } + + itemBlock.Log.Debug("Executing post hooks") + for _, pod := range postHookPods { + if err := itemBlock.itemBackupper.itemHookHandler.HandleHooks(itemBlock.Log, pod.Gr, pod.Item, itemBlock.itemBackupper.backupRequest.ResourceHooks, hook.PhasePost, itemBlock.itemBackupper.hookTracker); err != nil { + itemBlock.Log.WithError(err).WithField("name", pod.Item.GetName()).Error("Error running hooks for pod") + } + } + + return grList +} + +func (kb *kubernetesBackupper) backupItem(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource, itemBlock *BackupItemBlock) bool { + backedUpItem, _, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, false, false, itemBlock) if aggregate, ok := err.(kubeerrs.Aggregate); ok { log.WithField("name", unstructured.GetName()).Infof("%d errors encountered backup up item", len(aggregate.Errors())) // log each error separately so we get error location info in the log, and an @@ -527,7 +719,7 @@ func (kb *kubernetesBackupper) finalizeItem( unstructured *unstructured.Unstructured, preferredGVR schema.GroupVersionResource, ) (bool, []FileForArchive) { - backedUpItem, updateFiles, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, true, true) + backedUpItem, updateFiles, err := itemBackupper.backupItem(log, unstructured, gr, preferredGVR, true, true, nil) if aggregate, ok := err.(kubeerrs.Aggregate); ok { log.WithField("name", unstructured.GetName()).Infof("%d errors encountered backup up item", len(aggregate.Errors())) // log each error separately so we get error location info in the log, and an @@ -581,7 +773,7 @@ func (kb *kubernetesBackupper) backupCRD(log logrus.FieldLogger, gr schema.Group log.Infof("Found associated CRD %s to add to backup", gr.String()) - kb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr) + kb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr, nil) } func (kb *kubernetesBackupper) writeBackupVersion(tw *tar.Writer) error { diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 325ebab2c5..78ba0b0cc9 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -56,6 +56,7 @@ import ( persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks" "github.com/vmware-tanzu/velero/pkg/plugin/velero" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" + ibav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/itemblockaction/v1" vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/test" @@ -97,7 +98,7 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) // go through BackedUpItems after the backup to assemble the list of files we // expect to see in the tarball and compare to see if they match @@ -155,7 +156,7 @@ func TestBackupProgressIsUpdated(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) require.NotNil(t, req.Status.Progress) assert.Len(t, req.BackedUpItems, req.Status.Progress.TotalItems) @@ -878,7 +879,7 @@ func TestBackupOldResourceFiltering(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) + h.backupper.Backup(h.log, req, backupFile, tc.actions, nil, nil) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) }) @@ -1055,7 +1056,7 @@ func TestCRDInclusion(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) }) @@ -1150,7 +1151,7 @@ func TestBackupResourceCohabitation(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) }) @@ -1174,7 +1175,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { h.addItems(t, test.Deployments(builder.ForDeployment("ns-1", "deploy-1").Result())) h.addItems(t, test.ExtensionsDeployments(builder.ForDeployment("ns-1", "deploy-1").Result())) - h.backupper.Backup(h.log, backup1, backup1File, nil, nil) + h.backupper.Backup(h.log, backup1, backup1File, nil, nil, nil) assertTarballContents(t, backup1File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json", "resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json") @@ -1185,7 +1186,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { } backup2File := bytes.NewBuffer([]byte{}) - h.backupper.Backup(h.log, backup2, backup2File, nil, nil) + h.backupper.Backup(h.log, backup2, backup2File, nil, nil, nil) assertTarballContents(t, backup2File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json", "resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json") } @@ -1240,7 +1241,7 @@ func TestBackupResourceOrdering(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) assertTarballOrdering(t, backupFile, "pods", "persistentvolumeclaims", "persistentvolumes") }) @@ -1439,7 +1440,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { require.NoError(t, tc.backupReq.ResPolicies.BuildPolicy(tc.resPolicies)) } - err := h.backupper.Backup(h.log, tc.backupReq, backupFile, actions, nil) + err := h.backupper.Backup(h.log, tc.backupReq, backupFile, actions, nil, nil) assert.NoError(t, err) if tc.expectSkippedPVs != nil { @@ -1653,7 +1654,7 @@ func TestBackupActionsRunForCorrectItems(t *testing.T) { actions = append(actions, action) } - err := h.backupper.Backup(h.log, req, backupFile, actions, nil) + err := h.backupper.Backup(h.log, req, backupFile, actions, nil, nil) assert.NoError(t, err) for action, want := range tc.actions { @@ -1729,7 +1730,7 @@ func TestBackupWithInvalidActions(t *testing.T) { h.addItems(t, resource) } - assert.Error(t, h.backupper.Backup(h.log, req, backupFile, tc.actions, nil)) + assert.Error(t, h.backupper.Backup(h.log, req, backupFile, tc.actions, nil, nil)) }) } } @@ -1746,6 +1747,10 @@ func (a *appliesToErrorAction) Execute(item runtime.Unstructured, backup *velero panic("not implemented") } +func (a *appliesToErrorAction) GetRelatedItems(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + panic("not implemented") +} + func (a *appliesToErrorAction) Progress(operationID string, backup *velerov1.Backup) (velero.OperationProgress, error) { panic("not implemented") } @@ -1835,27 +1840,621 @@ func TestBackupActionModifications(t *testing.T) { item.Object["spec"].(map[string]interface{})["nodeName"] = "foo" }), }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, builder.ForPod("ns-1", "pod-1").NodeName("foo").Result()), + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, builder.ForPod("ns-1", "pod-1").NodeName("foo").Result()), + }, + }, + { + name: "modifications to name and namespace in an action are persisted in JSON and in filename", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetName(item.GetName() + "-updated") + item.SetNamespace(item.GetNamespace() + "-updated") + }), + }, + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1-updated/pod-1-updated.json": toUnstructuredOrFail(t, builder.ForPod("ns-1-updated", "pod-1-updated").Result()), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{ + Backup: tc.backup, + SkippedPVTracker: NewSkipPVTracker(), + } + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil, nil) + assert.NoError(t, err) + + assertTarballFileContents(t, backupFile, tc.want) + }) + } +} + +// TestBackupActionAdditionalItems runs backups with backup item actions that return +// additional items to be backed up, and verifies that those items are included in the +// backup tarball as appropriate. Verification is done by looking at the files that exist +// in the backup tarball. +func TestBackupActionAdditionalItems(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + actions []biav2.BackupItemAction + ibActions []ibav1.ItemBlockAction + want []string + }{ + { + name: "additional items that are already being backed up are not backed up twice", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + "resources/pods/namespaces/ns-3/pod-3.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-3/pod-3.json", + }, + }, + { + name: "when using a backup namespace filter, additional items that are in a non-included namespace are not backed up", + backup: defaultBackup().IncludedNamespaces("ns-1").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + }, + }, + { + name: "when using a backup namespace filter, additional items that are cluster-scoped are backed up", + backup: defaultBackup().IncludedNamespaces("ns-1").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/persistentvolumes/cluster/pv-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/persistentvolumes/v1-preferredversion/cluster/pv-1.json", + "resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json", + }, + }, + { + name: "when using a backup resource filter, additional items that are non-included resources are not backed up", + backup: defaultBackup().IncludedResources("pods").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + }, + }, + { + name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not backed up", + backup: defaultBackup().IncludeClusterResources(false).Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json", + }, + }, + { + name: "additional items with the velero.io/exclude-from-backup label are not backed up", + backup: defaultBackup().IncludedNamespaces("ns-1").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels(velerov1.ExcludeFromBackupLabel, "true")).Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/persistentvolumes/v1-preferredversion/cluster/pv-2.json", + }, + }, + + { + name: "if additional items aren't found in the API, they're skipped and the original item is still backed up", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []biav2.BackupItemAction{ + &pluggableAction{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-4", Name: "pod-4"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-5", Name: "pod-5"}, + } + + return item, additionalItems, "", nil, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + "resources/pods/namespaces/ns-3/pod-3.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-3/pod-3.json", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{ + Backup: tc.backup, + SkippedPVTracker: NewSkipPVTracker(), + } + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil, nil) + assert.NoError(t, err) + + assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) + }) + } +} + +// recordResourcesIBA is an ItemBlock item action that can be configured +// to run for specific resources/namespaces and simply records the items +// that it is executed for. +type recordResourcesIBA struct { + name string + selector velero.ResourceSelector + ids []string + backups []velerov1.Backup + executionErr error + relatedItems []velero.ResourceIdentifier +} + +func (a *recordResourcesIBA) GetRelatedItems(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + metadata, err := meta.Accessor(item) + if err != nil { + return a.relatedItems, err + } + a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) + a.backups = append(a.backups, *backup) + return a.relatedItems, a.executionErr +} + +func (a *recordResourcesIBA) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} + +func (a *recordResourcesIBA) Name() string { + return a.name +} + +func (a *recordResourcesIBA) ForResource(resource string) *recordResourcesIBA { + a.selector.IncludedResources = append(a.selector.IncludedResources, resource) + return a +} + +func (a *recordResourcesIBA) ForNamespace(namespace string) *recordResourcesIBA { + a.selector.IncludedNamespaces = append(a.selector.IncludedNamespaces, namespace) + return a +} + +func (a *recordResourcesIBA) ForLabelSelector(selector string) *recordResourcesIBA { + a.selector.LabelSelector = selector + return a +} + +func (a *recordResourcesIBA) WithRelatedItems(items []velero.ResourceIdentifier) *recordResourcesIBA { + a.relatedItems = items + return a +} + +func (a *recordResourcesIBA) WithName(name string) *recordResourcesIBA { + a.name = name + return a +} + +func (a *recordResourcesIBA) WithExecutionErr(executionErr error) *recordResourcesIBA { + a.executionErr = executionErr + return a +} + +// TestItemBlockActionsRunForCorrectItems runs backups with ItemBlock actions, and +// verifies that each action is run for the correct set of resources based on its +// AppliesTo() resource selector. Verification is done by using the recordResourcesIBA struct, +// which records which resources it's executed for. +func TestItemBlockActionsRunForCorrectItems(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + + // actions is a map from a recordResourcesIBA (which will record the items it was called for) + // to a slice of expected items, formatted as {namespace}/{name}. + actions map[*recordResourcesIBA][]string + }{ + { + name: "single action with no selector runs for all items", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA): {"ns-1/pod-1", "ns-2/pod-2", "pv-1", "pv-2"}, + }, + }, + { + name: "single action with a resource selector for namespaced resources runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForResource("pods"): {"ns-1/pod-1", "ns-2/pod-2"}, + }, + }, + { + name: "single action with a resource selector for cluster-scoped resources runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForResource("persistentvolumes"): {"pv-1", "pv-2"}, + }, + }, + { + name: "single action with a namespace selector runs only for resources in that namespace", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + test.Namespaces( + builder.ForNamespace("ns-1").Result(), + builder.ForNamespace("ns-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForNamespace("ns-1"): {"ns-1/pod-1", "ns-1/pvc-1"}, + }, + }, + { + name: "single action with a resource and namespace selector runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForResource("pods").ForNamespace("ns-1"): {"ns-1/pod-1"}, + }, + }, + { + name: "multiple actions, each with a different resource selector using short name, run for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForResource("po"): {"ns-1/pod-1", "ns-2/pod-2"}, + new(recordResourcesIBA).ForResource("pv"): {"pv-1", "pv-2"}, + }, + }, + { + name: "actions with selectors that don't match anything don't run for any resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil, + new(recordResourcesIBA).ForNamespace("ns-2").ForResource("pods"): nil, + }, + }, + { + name: "action with a selector that has unresolvable resources doesn't run for any resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesIBA][]string{ + new(recordResourcesIBA).ForResource("unresolvable"): nil, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{ + Backup: tc.backup, + SkippedPVTracker: NewSkipPVTracker(), + } + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + actions := []ibav1.ItemBlockAction{} + for action := range tc.actions { + actions = append(actions, action) + } + + err := h.backupper.Backup(h.log, req, backupFile, nil, actions, nil) + assert.NoError(t, err) + + for action, want := range tc.actions { + assert.Equal(t, want, action.ids) + } + }) + } +} + +// TestBackupWithInvalidItemBlockActions runs backups with ItemBlock actions that are invalid +// in some way (e.g. an invalid label selector returned from AppliesTo(), an error returned +// from AppliesTo()) and verifies that this causes the backupper.Backup(...) method to +// return an error. +func TestBackupWithInvalidItemBlockActions(t *testing.T) { + // all test cases in this function are expected to cause the method under test + // to return an error, so no expected results need to be set up. + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + actions []ibav1.ItemBlockAction + }{ + { + name: "action with invalid label selector results in an error", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + }, + actions: []ibav1.ItemBlockAction{ + new(recordResourcesIBA).ForLabelSelector("=invalid-selector"), }, }, { - name: "modifications to name and namespace in an action are persisted in JSON and in filename", + name: "action returning an error from AppliesTo results in an error", backup: defaultBackup(). Result(), apiResources: []*test.APIResource{ test.Pods( - builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), ), }, - actions: []biav2.BackupItemAction{ - modifyingActionGetter(func(item *unstructured.Unstructured) { - item.SetName(item.GetName() + "-updated") - item.SetNamespace(item.GetNamespace() + "-updated") - }), - }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1-updated/pod-1-updated.json": toUnstructuredOrFail(t, builder.ForPod("ns-1-updated", "pod-1-updated").Result()), + actions: []ibav1.ItemBlockAction{ + &appliesToErrorAction{}, }, }, } @@ -1875,28 +2474,25 @@ func TestBackupActionModifications(t *testing.T) { h.addItems(t, resource) } - err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) - assert.NoError(t, err) - - assertTarballFileContents(t, backupFile, tc.want) + assert.Error(t, h.backupper.Backup(h.log, req, backupFile, nil, tc.actions, nil)) }) } } -// TestBackupActionAdditionalItems runs backups with backup item actions that return -// additional items to be backed up, and verifies that those items are included in the +// TestItemBlockActionRelatedItems runs backups with ItemBlock actions that return +// related items, and verifies that those items are included in the // backup tarball as appropriate. Verification is done by looking at the files that exist // in the backup tarball. -func TestBackupActionAdditionalItems(t *testing.T) { +func TestItemBlockActionRelatedItems(t *testing.T) { tests := []struct { name string backup *velerov1.Backup apiResources []*test.APIResource - actions []biav2.BackupItemAction + actions []ibav1.ItemBlockAction want []string }{ { - name: "additional items that are already being backed up are not backed up twice", + name: "related items that are already being backed up are not backed up twice", backup: defaultBackup().Result(), apiResources: []*test.APIResource{ test.Pods( @@ -1905,16 +2501,16 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -1928,7 +2524,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, }, { - name: "when using a backup namespace filter, additional items that are in a non-included namespace are not backed up", + name: "when using a backup namespace filter, related items that are in a non-included namespace are not backed up", backup: defaultBackup().IncludedNamespaces("ns-1").Result(), apiResources: []*test.APIResource{ test.Pods( @@ -1937,15 +2533,15 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -1955,7 +2551,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, }, { - name: "when using a backup namespace filter, additional items that are cluster-scoped are backed up", + name: "when using a backup namespace filter, related items that are cluster-scoped are backed up", backup: defaultBackup().IncludedNamespaces("ns-1").Result(), apiResources: []*test.APIResource{ test.Pods( @@ -1967,15 +2563,15 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -1989,7 +2585,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, }, { - name: "when using a backup resource filter, additional items that are non-included resources are not backed up", + name: "when using a backup resource filter, related items that are non-included resources are not backed up", backup: defaultBackup().IncludedResources("pods").Result(), apiResources: []*test.APIResource{ test.Pods( @@ -2000,15 +2596,15 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -2018,7 +2614,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, }, { - name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not backed up", + name: "when IncludeClusterResources=false, related items that are cluster-scoped are not backed up", backup: defaultBackup().IncludeClusterResources(false).Result(), apiResources: []*test.APIResource{ test.Pods( @@ -2030,15 +2626,15 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -2050,7 +2646,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, }, { - name: "additional items with the velero.io/exclude-from-backup label are not backed up", + name: "related items with the velero.io/exclude-from-backup label are not backed up", backup: defaultBackup().IncludedNamespaces("ns-1").Result(), apiResources: []*test.APIResource{ test.Pods( @@ -2061,15 +2657,15 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPersistentVolume("pv-2").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -2082,7 +2678,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { }, { - name: "if additional items aren't found in the API, they're skipped and the original item is still backed up", + name: "if related items aren't found in the API, they're skipped and the original item is still backed up", backup: defaultBackup().Result(), apiResources: []*test.APIResource{ test.Pods( @@ -2091,16 +2687,16 @@ func TestBackupActionAdditionalItems(t *testing.T) { builder.ForPod("ns-3", "pod-3").Result(), ), }, - actions: []biav2.BackupItemAction{ - &pluggableAction{ + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ {GroupResource: kuberesource.Pods, Namespace: "ns-4", Name: "pod-4"}, {GroupResource: kuberesource.Pods, Namespace: "ns-5", Name: "pod-5"}, } - return item, additionalItems, "", nil, nil + return relatedItems, nil }, }, }, @@ -2130,7 +2726,7 @@ func TestBackupActionAdditionalItems(t *testing.T) { h.addItems(t, resource) } - err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) + err := h.backupper.Backup(h.log, req, backupFile, nil, tc.actions, nil) assert.NoError(t, err) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) @@ -2585,7 +3181,7 @@ func TestBackupWithSnapshots(t *testing.T) { h.addItems(t, resource) } - err := h.backupper.Backup(h.log, tc.req, backupFile, nil, tc.snapshotterGetter) + err := h.backupper.Backup(h.log, tc.req, backupFile, nil, nil, tc.snapshotterGetter) assert.NoError(t, err) assert.Equal(t, tc.want, tc.req.VolumeSnapshots) @@ -2744,7 +3340,7 @@ func TestBackupWithAsyncOperations(t *testing.T) { h.addItems(t, resource) } - err := h.backupper.Backup(h.log, tc.req, backupFile, tc.actions, nil) + err := h.backupper.Backup(h.log, tc.req, backupFile, tc.actions, nil, nil) assert.NoError(t, err) resultOper := *tc.req.GetItemOperationsList() @@ -2810,7 +3406,7 @@ func TestBackupWithInvalidHooks(t *testing.T) { h.addItems(t, resource) } - assert.EqualError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil), tc.want.Error()) + assert.EqualError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil, nil), tc.want.Error()) }) } } @@ -2832,8 +3428,10 @@ func TestBackupWithHooks(t *testing.T) { name string backup *velerov1.Backup apiResources []*test.APIResource + actions []ibav1.ItemBlockAction wantExecutePodCommandCalls []*expectedCall wantBackedUp []string + wantHookExecutionLog []test.HookExecutionEntry }{ { name: "pre hook with no resource filters runs for all pods", @@ -2992,6 +3590,223 @@ func TestBackupWithHooks(t *testing.T) { "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", }, }, + { + name: "pre and post hooks run for two pods sequentially by pods in different ItemBlocks", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-1", + PreHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + }, + }, + PostHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + }, + }, + }, + }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-1", "pod-2").Result(), + ), + }, + wantExecutePodCommandCalls: []*expectedCall{ + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + err: nil, + }, + }, + wantHookExecutionLog: []test.HookExecutionEntry{ + { + Namespace: "ns-1", + Name: "pod-1", + HookName: "hook-1", + HookCommand: []string{"pre"}, + }, + { + Namespace: "ns-1", + Name: "pod-1", + HookName: "hook-1", + HookCommand: []string{"post"}, + }, + { + Namespace: "ns-1", + Name: "pod-2", + HookName: "hook-1", + HookCommand: []string{"pre"}, + }, + { + Namespace: "ns-1", + Name: "pod-2", + HookName: "hook-1", + HookCommand: []string{"post"}, + }, + }, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-1/pod-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-2.json", + }, + }, + { + name: "both pre hooks run before both post hooks for pods in the same ItemBlock", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-1", + PreHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + }, + }, + PostHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + }, + }, + }, + }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-1", "pod-2").Result(), + ), + }, + actions: []ibav1.ItemBlockAction{ + &pluggableIBA{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + getRelatedItemsFunc: func(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + relatedItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-1", Name: "pod-1"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-1", Name: "pod-2"}, + } + + return relatedItems, nil + }, + }, + }, + wantExecutePodCommandCalls: []*expectedCall{ + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + err: nil, + }, + }, + wantHookExecutionLog: []test.HookExecutionEntry{ + { + Namespace: "ns-1", + Name: "pod-1", + HookName: "hook-1", + HookCommand: []string{"pre"}, + }, + { + Namespace: "ns-1", + Name: "pod-2", + HookName: "hook-1", + HookCommand: []string{"pre"}, + }, + { + Namespace: "ns-1", + Name: "pod-1", + HookName: "hook-1", + HookCommand: []string{"post"}, + }, + { + Namespace: "ns-1", + Name: "pod-2", + HookName: "hook-1", + HookCommand: []string{"post"}, + }, + }, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-1/pod-2.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json", + "resources/pods/v1-preferredversion/namespaces/ns-1/pod-2.json", + }, + }, { name: "item is not backed up if hook returns an error when OnError=Fail", backup: defaultBackup(). @@ -3076,8 +3891,11 @@ func TestBackupWithHooks(t *testing.T) { h.addItems(t, resource) } - require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil)) + require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, tc.actions, nil)) + if tc.wantHookExecutionLog != nil { + assert.Equal(t, tc.wantHookExecutionLog, podCommandExecutor.HookExecutionLog) + } assertTarballContents(t, backupFile, append(tc.wantBackedUp, "metadata/version")...) }) } @@ -3267,7 +4085,7 @@ func TestBackupWithPodVolume(t *testing.T) { h.addItems(t, resource) } - require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, tc.snapshotterGetter)) + require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil, tc.snapshotterGetter)) assert.Equal(t, tc.want, req.PodVolumeBackups) @@ -3313,6 +4131,28 @@ func (a *pluggableAction) Name() string { return "" } +// pluggableIBA is an ItemBlock action that can be plugged with GetRelatedItems function bodies at runtime. +type pluggableIBA struct { + selector velero.ResourceSelector + getRelatedItemsFunc func(runtime.Unstructured, *velerov1.Backup) ([]velero.ResourceIdentifier, error) +} + +func (a *pluggableIBA) GetRelatedItems(item runtime.Unstructured, backup *velerov1.Backup) ([]velero.ResourceIdentifier, error) { + if a.getRelatedItemsFunc == nil { + return nil, nil + } + + return a.getRelatedItemsFunc(item, backup) +} + +func (a *pluggableIBA) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} + +func (a *pluggableIBA) Name() string { + return "" +} + type harness struct { *test.APIServer backupper *kubernetesBackupper @@ -4349,7 +5189,7 @@ func TestBackupNewResourceFiltering(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) + h.backupper.Backup(h.log, req, backupFile, tc.actions, nil, nil) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) }) @@ -4510,7 +5350,7 @@ func TestBackupNamespaces(t *testing.T) { h.addItems(t, resource) } - h.backupper.Backup(h.log, req, backupFile, nil, nil) + h.backupper.Backup(h.log, req, backupFile, nil, nil, nil) assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) }) diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index af8ddd26e9..374a319666 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -46,6 +46,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" + "github.com/vmware-tanzu/velero/pkg/itemblock" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" @@ -88,8 +89,8 @@ type FileForArchive struct { // If finalize is true, then it returns the bytes instead of writing them to the tarWriter // In addition to the error return, backupItem also returns a bool indicating whether the item // was actually backed up. -func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource, mustInclude, finalize bool) (bool, []FileForArchive, error) { - selectedForBackup, files, err := ib.backupItemInternal(logger, obj, groupResource, preferredGVR, mustInclude, finalize) +func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource, mustInclude, finalize bool, itemBlock *BackupItemBlock) (bool, []FileForArchive, error) { + selectedForBackup, files, err := ib.backupItemInternal(logger, obj, groupResource, preferredGVR, mustInclude, finalize, itemBlock) // return if not selected, an error occurred, there are no files to add, or for finalize if !selectedForBackup || err != nil || len(files) == 0 || finalize { return selectedForBackup, files, err @@ -106,35 +107,21 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr return true, []FileForArchive{}, nil } -func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource, mustInclude, finalize bool) (bool, []FileForArchive, error) { - var itemFiles []FileForArchive - metadata, err := meta.Accessor(obj) - if err != nil { - return false, itemFiles, err - } - - namespace := metadata.GetNamespace() - name := metadata.GetName() - - log := logger.WithFields(map[string]interface{}{ - "name": name, - "resource": groupResource.String(), - "namespace": namespace, - }) - +func (ib *itemBackupper) itemInclusionChecks(log logrus.FieldLogger, mustInclude bool, metadata metav1.Object, obj runtime.Unstructured, groupResource schema.GroupResource) bool { if mustInclude { log.Infof("Skipping the exclusion checks for this resource") } else { if metadata.GetLabels()[velerov1api.ExcludeFromBackupLabel] == "true" { log.Infof("Excluding item because it has label %s=true", velerov1api.ExcludeFromBackupLabel) ib.trackSkippedPV(obj, groupResource, "", fmt.Sprintf("item has label %s=true", velerov1api.ExcludeFromBackupLabel), log) - return false, itemFiles, nil + return false } // NOTE: we have to re-check namespace & resource includes/excludes because it's possible that // backupItem can be invoked by a custom action. + namespace := metadata.GetNamespace() if namespace != "" && !ib.backupRequest.NamespaceIncludesExcludes.ShouldInclude(namespace) { log.Info("Excluding item because namespace is excluded") - return false, itemFiles, nil + return false } // NOTE: we specifically allow namespaces to be backed up even if it's excluded. @@ -144,19 +131,41 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti if namespace == "" && groupResource != kuberesource.Namespaces && ib.backupRequest.ResourceIncludesExcludes.ShouldExclude(groupResource.String()) { log.Info("Excluding item because resource is cluster-scoped and is excluded by cluster filter.") - return false, itemFiles, nil + return false } // Only check namespace-scoped resource to avoid expelling cluster resources // are not specified in included list. if namespace != "" && !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) { log.Info("Excluding item because resource is excluded") - return false, itemFiles, nil + return false } } if metadata.GetDeletionTimestamp() != nil { log.Info("Skipping item because it's being deleted.") + return false + } + return true +} + +func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource, mustInclude, finalize bool, itemBlock *BackupItemBlock) (bool, []FileForArchive, error) { + var itemFiles []FileForArchive + metadata, err := meta.Accessor(obj) + if err != nil { + return false, itemFiles, err + } + + namespace := metadata.GetNamespace() + name := metadata.GetName() + + log := logger.WithFields(map[string]interface{}{ + "name": name, + "resource": groupResource.String(), + "namespace": namespace, + }) + + if !ib.itemInclusionChecks(log, mustInclude, metadata, obj, groupResource) { return false, itemFiles, nil } @@ -180,10 +189,6 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti pvbVolumes []string ) - log.Debug("Executing pre hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePre, ib.hookTracker); err != nil { - return false, itemFiles, err - } if optedOut, podName := ib.podVolumeSnapshotTracker.OptedoutByPod(namespace, name); optedOut { ib.trackSkippedPV(obj, groupResource, podVolumeApproach, fmt.Sprintf("opted out due to annotation in pod %s", podName), log) } @@ -231,15 +236,9 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti // the group version of the object. versionPath := resourceVersion(obj) - updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize) + updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize, itemBlock) if err != nil { backupErrs = append(backupErrs, err) - - // if there was an error running actions, execute post hooks and return - log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { - backupErrs = append(backupErrs, err) - } return false, itemFiles, kubeerrs.NewAggregate(backupErrs) } @@ -294,11 +293,6 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti } } - log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { - backupErrs = append(backupErrs, err) - } - if len(backupErrs) != 0 { return false, itemFiles, kubeerrs.NewAggregate(backupErrs) } @@ -353,6 +347,7 @@ func (ib *itemBackupper) executeActions( name, namespace string, metadata metav1.Object, finalize bool, + itemBlock *BackupItemBlock, ) (runtime.Unstructured, []FileForArchive, error) { var itemFiles []FileForArchive for _, action := range ib.backupRequest.ResolvedActions { @@ -451,35 +446,54 @@ func (ib *itemBackupper) executeActions( } for _, additionalItem := range additionalItemIdentifiers { - gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion("")) - if err != nil { - return nil, itemFiles, err - } + var itemList []itemblock.ItemBlockItem - client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace) - if err != nil { - return nil, itemFiles, err + // get item content from itemBlock if it's there to avoid the additional APIServer call + // We could have multiple versions to back up if EnableAPIGroupVersions is set + if itemBlock != nil { + itemList = itemBlock.FindItem(additionalItem.GroupResource, additionalItem.Namespace, additionalItem.Name) } + // if item is not in itemblock, pull it from the cluster + if len(itemList) == 0 { + log.Infof("Additional Item %s %s/%s not found in ItemBlock, getting from cluster", additionalItem.GroupResource, additionalItem.Namespace, additionalItem.Name) + + gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion("")) + if err != nil { + return nil, itemFiles, err + } - item, err := client.Get(additionalItem.Name, metav1.GetOptions{}) + client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace) + if err != nil { + return nil, itemFiles, err + } - if apierrors.IsNotFound(err) { - log.WithFields(logrus.Fields{ - "groupResource": additionalItem.GroupResource, - "namespace": additionalItem.Namespace, - "name": additionalItem.Name, - }).Warnf("Additional item was not found in Kubernetes API, can't back it up") - continue - } - if err != nil { - return nil, itemFiles, errors.WithStack(err) + item, err := client.Get(additionalItem.Name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + log.WithFields(logrus.Fields{ + "groupResource": additionalItem.GroupResource, + "namespace": additionalItem.Namespace, + "name": additionalItem.Name, + }).Warnf("Additional item was not found in Kubernetes API, can't back it up") + continue + } + if err != nil { + return nil, itemFiles, errors.WithStack(err) + } + itemList = append(itemList, itemblock.ItemBlockItem{ + Gr: additionalItem.GroupResource, + Item: item, + PreferredGVR: gvr, + }) } - _, additionalItemFiles, err := ib.backupItem(log, item, gvr.GroupResource(), gvr, mustInclude, finalize) - if err != nil { - return nil, itemFiles, err + for _, item := range itemList { + _, additionalItemFiles, err := ib.backupItem(log, item.Item, additionalItem.GroupResource, item.PreferredGVR, mustInclude, finalize, itemBlock) + if err != nil { + return nil, itemFiles, err + } + itemFiles = append(itemFiles, additionalItemFiles...) } - itemFiles = append(itemFiles, additionalItemFiles...) } } return obj, itemFiles, nil diff --git a/pkg/backup/item_collector.go b/pkg/backup/item_collector.go index af7ffe9ae6..84c1429ea4 100644 --- a/pkg/backup/item_collector.go +++ b/pkg/backup/item_collector.go @@ -178,6 +178,8 @@ type kubernetesResource struct { groupResource schema.GroupResource preferredGVR schema.GroupVersionResource namespace, name, path string + orderedResource bool + inItemBlock bool // set to true during backup processing when added to an ItemBlock } // getItemsFromResourceIdentifiers get the kubernetesResources @@ -294,6 +296,7 @@ func sortResourcesByOrder( // First select items from the order for _, name := range order { if item, ok := itemMap[name]; ok { + item.orderedResource = true sortedItems = append(sortedItems, item) log.Debugf("%s added to sorted resource list.", item.name) delete(itemMap, name) diff --git a/pkg/backup/item_collector_test.go b/pkg/backup/item_collector_test.go index f24b42486d..dc54d57bbe 100644 --- a/pkg/backup/item_collector_test.go +++ b/pkg/backup/item_collector_test.go @@ -65,13 +65,15 @@ func TestSortCoreGroup(t *testing.T) { func TestSortOrderedResource(t *testing.T) { log := logrus.StandardLogger() podResources := []*kubernetesResource{ + {namespace: "ns1", name: "pod3"}, {namespace: "ns1", name: "pod1"}, {namespace: "ns1", name: "pod2"}, } order := []string{"ns1/pod2", "ns1/pod1"} expectedResources := []*kubernetesResource{ - {namespace: "ns1", name: "pod2"}, - {namespace: "ns1", name: "pod1"}, + {namespace: "ns1", name: "pod2", orderedResource: true}, + {namespace: "ns1", name: "pod1", orderedResource: true}, + {namespace: "ns1", name: "pod3"}, } sortedResources := sortResourcesByOrder(log, podResources, order) assert.Equal(t, expectedResources, sortedResources) @@ -80,11 +82,13 @@ func TestSortOrderedResource(t *testing.T) { pvResources := []*kubernetesResource{ {name: "pv1"}, {name: "pv2"}, + {name: "pv3"}, } pvOrder := []string{"pv5", "pv2", "pv1"} expectedPvResources := []*kubernetesResource{ - {name: "pv2"}, - {name: "pv1"}, + {name: "pv2", orderedResource: true}, + {name: "pv1", orderedResource: true}, + {name: "pv3"}, } sortedPvResources := sortResourcesByOrder(log, pvResources, pvOrder) assert.Equal(t, expectedPvResources, sortedPvResources) diff --git a/pkg/backup/itemblock.go b/pkg/backup/itemblock.go new file mode 100644 index 0000000000..4ea8e74b1f --- /dev/null +++ b/pkg/backup/itemblock.go @@ -0,0 +1,66 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "encoding/json" + "os" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/vmware-tanzu/velero/pkg/itemblock" +) + +type BackupItemBlock struct { + itemblock.ItemBlock + // This is a reference to the shared itemBackupper for the backup + itemBackupper *itemBackupper +} + +func NewBackupItemBlock(log logrus.FieldLogger, itemBackupper *itemBackupper) *BackupItemBlock { + return &BackupItemBlock{ + ItemBlock: itemblock.ItemBlock{Log: log}, + itemBackupper: itemBackupper, + } +} + +func (b *BackupItemBlock) addKubernetesResource(item *kubernetesResource, log logrus.FieldLogger) *unstructured.Unstructured { + // no-op if item is already in a block + if item.inItemBlock { + return nil + } + var unstructured unstructured.Unstructured + item.inItemBlock = true + + f, err := os.Open(item.path) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error opening file containing item") + return nil + } + defer f.Close() + defer os.Remove(f.Name()) + + if err := json.NewDecoder(f).Decode(&unstructured); err != nil { + log.WithError(errors.WithStack(err)).Error("Error decoding JSON from file") + return nil + } + log.Infof("adding %s %s/%s to ItemBlock", item.groupResource, item.namespace, item.name) + b.AddUnstructured(item.groupResource, &unstructured, item.preferredGVR) + return &unstructured +} diff --git a/pkg/backup/request.go b/pkg/backup/request.go index ab146d13e5..b0cb298f97 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -46,6 +46,7 @@ type Request struct { ResourceIncludesExcludes collections.IncludesExcludesInterface ResourceHooks []hook.ResourceHook ResolvedActions []framework.BackupItemResolvedActionV2 + ResolvedItemBlockActions []framework.ItemBlockResolvedAction VolumeSnapshots []*volume.Snapshot PodVolumeBackups []*velerov1api.PodVolumeBackup BackedUpItems map[itemKey]struct{} diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 989617722c..e726bf4186 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -618,6 +618,11 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { if err != nil { return err } + backupLog.Info("Getting ItemBlock actions") + ibActions, err := pluginManager.GetItemBlockActions() + if err != nil { + return err + } backupLog.Info("Setting up backup store to check for backup existence") backupStore, err := b.backupStoreGetter.Get(backup.StorageLocation, pluginManager, backupLog) if err != nil { @@ -635,9 +640,10 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { } backupItemActionsResolver := framework.NewBackupItemActionResolverV2(actions) + itemBlockActionResolver := framework.NewItemBlockActionResolver(ibActions) var fatalErrs []error - if err := b.backupper.BackupWithResolvers(backupLog, backup, backupFile, backupItemActionsResolver, pluginManager); err != nil { + if err := b.backupper.BackupWithResolvers(backupLog, backup, backupFile, backupItemActionsResolver, itemBlockActionResolver, pluginManager); err != nil { fatalErrs = append(fatalErrs, err) } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index c175c70415..edae384585 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -57,6 +57,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" + ibav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/itemblockaction/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/boolptr" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -67,13 +68,15 @@ type fakeBackupper struct { mock.Mock } -func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, actions []biav2.BackupItemAction, volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { - args := b.Called(logger, backup, backupFile, actions, volumeSnapshotterGetter) +func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, actions []biav2.BackupItemAction, itemBlockActions []ibav1.ItemBlockAction, volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { + args := b.Called(logger, backup, backupFile, actions, itemBlockActions, volumeSnapshotterGetter) return args.Error(0) } func (b *fakeBackupper) BackupWithResolvers(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, - backupItemActionResolver framework.BackupItemActionResolverV2, volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { + backupItemActionResolver framework.BackupItemActionResolverV2, + itemBlockActionResolver framework.ItemBlockActionResolver, + volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { args := b.Called(logger, backup, backupFile, backupItemActionResolver, volumeSnapshotterGetter) return args.Error(0) } @@ -1345,6 +1348,7 @@ func TestProcessBackupCompletions(t *testing.T) { } pluginManager.On("GetBackupItemActionsV2").Return(nil, nil) + pluginManager.On("GetItemBlockActions").Return(nil, nil) pluginManager.On("CleanupClients").Return(nil) backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []biav2.BackupItemAction(nil), pluginManager).Return(nil) backupper.On("BackupWithResolvers", mock.Anything, mock.Anything, mock.Anything, framework.BackupItemActionResolverV2{}, pluginManager).Return(nil) diff --git a/pkg/itemblock/itemblock.go b/pkg/itemblock/itemblock.go new file mode 100644 index 0000000000..cc3b5125eb --- /dev/null +++ b/pkg/itemblock/itemblock.go @@ -0,0 +1,60 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package itemblock + +import ( + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ItemBlock struct { + Log logrus.FieldLogger + Items []ItemBlockItem +} + +type ItemBlockItem struct { + Gr schema.GroupResource + Item *unstructured.Unstructured + PreferredGVR schema.GroupVersionResource +} + +func (ib *ItemBlock) AddUnstructured(gr schema.GroupResource, item *unstructured.Unstructured, preferredGVR schema.GroupVersionResource) { + ib.Items = append(ib.Items, ItemBlockItem{ + Gr: gr, + Item: item, + PreferredGVR: preferredGVR, + }) +} + +// Could return multiple items if EnableAPIGroupVersions is set. The item matching the preferredGVR is returned first +func (ib *ItemBlock) FindItem(gr schema.GroupResource, namespace, name string) []ItemBlockItem { + var itemList []ItemBlockItem + var returnList []ItemBlockItem + + for _, item := range ib.Items { + if item.Gr == gr && item.Item != nil && item.Item.GetName() == name && item.Item.GetNamespace() == namespace { + itemGV, err := schema.ParseGroupVersion(item.Item.GetAPIVersion()) + if err == nil && item.PreferredGVR.GroupVersion() == itemGV { + returnList = append(returnList, item) + } else { + itemList = append(itemList, item) + } + } + } + return append(returnList, itemList...) +} diff --git a/pkg/test/mock_pod_command_executor.go b/pkg/test/mock_pod_command_executor.go index 219361b776..414ae30860 100644 --- a/pkg/test/mock_pod_command_executor.go +++ b/pkg/test/mock_pod_command_executor.go @@ -24,9 +24,22 @@ import ( type MockPodCommandExecutor struct { mock.Mock + // hook execution order + HookExecutionLog []HookExecutionEntry +} + +type HookExecutionEntry struct { + Namespace, Name, HookName string + HookCommand []string } func (e *MockPodCommandExecutor) ExecutePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error { + e.HookExecutionLog = append(e.HookExecutionLog, HookExecutionEntry{ + Namespace: namespace, + Name: name, + HookName: hookName, + HookCommand: hook.Command, + }) args := e.Called(log, item, namespace, name, hookName, hook) return args.Error(0) }