From 43dfc651e34ec3a9ad1d7de1b7aa64a4caece585 Mon Sep 17 00:00:00 2001 From: Lalatendu Mohanty Date: Thu, 13 Feb 2025 13:24:37 -0500 Subject: [PATCH 1/2] :seedling: Move catalogd controllers and webhooks to internal/catalogd (#1749) * Moving catalogd controllers and webhook Moving catalogd/internal/controllers and catalogd/internal/webhooks to internal/catalogd along with the Makefile changes Signed-off-by: Lalatendu Mohanty * Removed 'build-deps' target from catalogd Makefile As build-deps needs the generate target which has been moved to the root Makefile i.e. operator-controller/Makefile Signed-off-by: Lalatendu Mohanty --------- Signed-off-by: Lalatendu Mohanty --- Makefile | 10 ++++++---- catalogd/Makefile | 17 ++--------------- catalogd/cmd/catalogd/main.go | 4 ++-- .../core/clustercatalog_controller.go | 0 .../core/clustercatalog_controller_test.go | 0 .../controllers/core/pull_secret_controller.go | 0 .../core/pull_secret_controller_test.go | 0 .../webhook/cluster_catalog_webhook.go | 0 .../webhook/cluster_catalog_webhook_test.go | 0 9 files changed, 10 insertions(+), 21 deletions(-) rename {catalogd/internal => internal/catalogd}/controllers/core/clustercatalog_controller.go (100%) rename {catalogd/internal => internal/catalogd}/controllers/core/clustercatalog_controller_test.go (100%) rename {catalogd/internal => internal/catalogd}/controllers/core/pull_secret_controller.go (100%) rename {catalogd/internal => internal/catalogd}/controllers/core/pull_secret_controller_test.go (100%) rename {catalogd/internal => internal/catalogd}/webhook/cluster_catalog_webhook.go (100%) rename {catalogd/internal => internal/catalogd}/webhook/cluster_catalog_webhook_test.go (100%) diff --git a/Makefile b/Makefile index ee943a9d5..21ecf8872 100644 --- a/Makefile +++ b/Makefile @@ -115,16 +115,18 @@ tidy: #HELP Update dependencies. # Force tidy to use the version already in go.mod $(Q)go mod tidy -go=$(GOLANG_VERSION) - .PHONY: manifests KUSTOMIZE_CRDS_DIR := config/base/crd/bases KUSTOMIZE_RBAC_DIR := config/base/rbac +KUSTOMIZE_WEBHOOKS_DIR := config/base/manager/webhook manifests: $(CONTROLLER_GEN) #EXHELP Generate WebhookConfiguration, ClusterRole, and CustomResourceDefinition objects. - # To generate the manifests used and do not use catalogd directory + # Generate the operator-controller manifests rm -rf $(KUSTOMIZE_CRDS_DIR) && $(CONTROLLER_GEN) crd paths=./api/... output:crd:artifacts:config=$(KUSTOMIZE_CRDS_DIR) rm -f $(KUSTOMIZE_RBAC_DIR)/role.yaml && $(CONTROLLER_GEN) rbac:roleName=manager-role paths=./internal/operator-controller/... output:rbac:artifacts:config=$(KUSTOMIZE_RBAC_DIR) - # To generate the manifests for catalogd - $(MAKE) -C catalogd generate + # Generate the catalogd manifests + rm -rf catalogd/$(KUSTOMIZE_CRDS_DIR) && $(CONTROLLER_GEN) crd paths="./catalogd/api/..." output:crd:artifacts:config=catalogd/$(KUSTOMIZE_CRDS_DIR) + rm -f catalogd/$(KUSTOMIZE_RBAC_DIR)/role.yaml && $(CONTROLLER_GEN) rbac:roleName=manager-role paths="./internal/catalogd/..." output:rbac:artifacts:config=catalogd/$(KUSTOMIZE_RBAC_DIR) + rm -f catalogd/$(KUSTOMIZE_WEBHOOKS_DIR)/manifests.yaml && $(CONTROLLER_GEN) webhook paths="./internal/catalogd/..." output:webhook:artifacts:config=catalogd/$(KUSTOMIZE_WEBHOOKS_DIR) .PHONY: generate generate: $(CONTROLLER_GEN) #EXHELP Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. diff --git a/catalogd/Makefile b/catalogd/Makefile index 6f61043de..19f60fb0e 100644 --- a/catalogd/Makefile +++ b/catalogd/Makefile @@ -58,16 +58,6 @@ help: ## Display this help. clean: ## Remove binaries and test artifacts rm -rf bin -.PHONY: generate -KUSTOMIZE_CRDS_DIR := config/base/crd/bases -KUSTOMIZE_RBAC_DIR := config/base/rbac -KUSTOMIZE_WEBHOOKS_DIR := config/base/manager/webhook -generate: $(CONTROLLER_GEN) ## Generate code and manifests. - $(CONTROLLER_GEN) object:headerFile="../hack/boilerplate.go.txt" paths="./..." - rm -rf $(KUSTOMIZE_CRDS_DIR) && $(CONTROLLER_GEN) crd paths="./api/..." output:crd:artifacts:config=$(KUSTOMIZE_CRDS_DIR) - rm -f $(KUSTOMIZE_RBAC_DIR)/role.yaml && $(CONTROLLER_GEN) rbac:roleName=manager-role paths="./internal/..." output:rbac:artifacts:config=$(KUSTOMIZE_RBAC_DIR) - rm -f $(KUSTOMIZE_WEBHOOKS_DIR)/manifests.yaml && $(CONTROLLER_GEN) webhook paths="./internal/..." output:webhook:artifacts:config=$(KUSTOMIZE_WEBHOOKS_DIR) - ##@ Build BINARIES=catalogd @@ -98,18 +88,15 @@ export GO_BUILD_TAGS := containers_image_openpgp BUILDCMD = go build -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o $(BUILDBIN)/$(notdir $@) ./cmd/$(notdir $@) -.PHONY: build-deps -build-deps: generate - .PHONY: build go-build-local $(BINARIES) -build: build-deps go-build-local ## Build binaries for current GOOS and GOARCH. +build: go-build-local ## Build binaries for current GOOS and GOARCH. go-build-local: $(BINARIES) $(BINARIES): BUILDBIN = bin $(BINARIES): $(BUILDCMD) .PHONY: build-linux go-build-linux $(LINUX_BINARIES) -build-linux: build-deps go-build-linux ## Build binaries for GOOS=linux and local GOARCH. +build-linux: go-build-linux ## Build binaries for GOOS=linux and local GOARCH. go-build-linux: $(LINUX_BINARIES) $(LINUX_BINARIES): BUILDBIN = bin/linux $(LINUX_BINARIES): diff --git a/catalogd/cmd/catalogd/main.go b/catalogd/cmd/catalogd/main.go index 7e973cfc3..693a26b6c 100644 --- a/catalogd/cmd/catalogd/main.go +++ b/catalogd/cmd/catalogd/main.go @@ -56,14 +56,14 @@ import ( crwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - corecontrollers "github.com/operator-framework/operator-controller/catalogd/internal/controllers/core" - "github.com/operator-framework/operator-controller/catalogd/internal/webhook" + corecontrollers "github.com/operator-framework/operator-controller/internal/catalogd/controllers/core" "github.com/operator-framework/operator-controller/internal/catalogd/features" "github.com/operator-framework/operator-controller/internal/catalogd/garbagecollection" catalogdmetrics "github.com/operator-framework/operator-controller/internal/catalogd/metrics" "github.com/operator-framework/operator-controller/internal/catalogd/serverutil" "github.com/operator-framework/operator-controller/internal/catalogd/source" "github.com/operator-framework/operator-controller/internal/catalogd/storage" + "github.com/operator-framework/operator-controller/internal/catalogd/webhook" fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" "github.com/operator-framework/operator-controller/internal/shared/version" ) diff --git a/catalogd/internal/controllers/core/clustercatalog_controller.go b/internal/catalogd/controllers/core/clustercatalog_controller.go similarity index 100% rename from catalogd/internal/controllers/core/clustercatalog_controller.go rename to internal/catalogd/controllers/core/clustercatalog_controller.go diff --git a/catalogd/internal/controllers/core/clustercatalog_controller_test.go b/internal/catalogd/controllers/core/clustercatalog_controller_test.go similarity index 100% rename from catalogd/internal/controllers/core/clustercatalog_controller_test.go rename to internal/catalogd/controllers/core/clustercatalog_controller_test.go diff --git a/catalogd/internal/controllers/core/pull_secret_controller.go b/internal/catalogd/controllers/core/pull_secret_controller.go similarity index 100% rename from catalogd/internal/controllers/core/pull_secret_controller.go rename to internal/catalogd/controllers/core/pull_secret_controller.go diff --git a/catalogd/internal/controllers/core/pull_secret_controller_test.go b/internal/catalogd/controllers/core/pull_secret_controller_test.go similarity index 100% rename from catalogd/internal/controllers/core/pull_secret_controller_test.go rename to internal/catalogd/controllers/core/pull_secret_controller_test.go diff --git a/catalogd/internal/webhook/cluster_catalog_webhook.go b/internal/catalogd/webhook/cluster_catalog_webhook.go similarity index 100% rename from catalogd/internal/webhook/cluster_catalog_webhook.go rename to internal/catalogd/webhook/cluster_catalog_webhook.go diff --git a/catalogd/internal/webhook/cluster_catalog_webhook_test.go b/internal/catalogd/webhook/cluster_catalog_webhook_test.go similarity index 100% rename from catalogd/internal/webhook/cluster_catalog_webhook_test.go rename to internal/catalogd/webhook/cluster_catalog_webhook_test.go From ee8d8210ebea9586f637122acdba729ca5385e89 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Thu, 13 Feb 2025 14:43:06 -0500 Subject: [PATCH 2/2] imageutil: further containers/image consolidation (#1731) --- .gitignore | 1 + catalogd/cmd/catalogd/main.go | 26 +- cmd/operator-controller/main.go | 18 +- go.mod | 2 +- .../core/clustercatalog_controller.go | 86 ++- .../core/clustercatalog_controller_test.go | 228 +++---- internal/catalogd/source/containers_image.go | 340 ---------- .../catalogd/source/containers_image_test.go | 477 -------------- internal/catalogd/source/unpacker.go | 72 -- .../catalogmetadata/client/client.go | 2 +- .../clusterextension_controller.go | 26 +- .../clusterextension_controller_test.go | 117 +--- .../controllers/suite_test.go | 20 - .../rukpak/source/containers_image.go | 299 --------- .../rukpak/source/containers_image_test.go | 403 ------------ .../rukpak/source/unpacker.go | 74 --- internal/shared/util/error/terminal.go | 10 + .../httputil => shared/util/http}/certlog.go | 2 +- .../util/http}/certpoolwatcher.go | 2 +- .../util/http}/certpoolwatcher_test.go | 4 +- .../httputil => shared/util/http}/certutil.go | 2 +- .../util/http}/certutil_test.go | 10 +- .../httputil => shared/util/http}/httputil.go | 2 +- internal/shared/util/image/cache.go | 185 ++++++ internal/shared/util/image/cache_test.go | 621 ++++++++++++++++++ internal/shared/util/image/filters.go | 65 ++ .../image/{layers_test.go => filters_test.go} | 2 +- internal/shared/util/image/layers.go | 118 ---- internal/shared/util/image/mocks.go | 61 ++ internal/shared/util/image/pull.go | 275 ++++++++ internal/shared/util/image/pull_test.go | 305 +++++++++ 31 files changed, 1753 insertions(+), 2102 deletions(-) delete mode 100644 internal/catalogd/source/containers_image.go delete mode 100644 internal/catalogd/source/containers_image_test.go delete mode 100644 internal/catalogd/source/unpacker.go delete mode 100644 internal/operator-controller/rukpak/source/containers_image.go delete mode 100644 internal/operator-controller/rukpak/source/containers_image_test.go delete mode 100644 internal/operator-controller/rukpak/source/unpacker.go create mode 100644 internal/shared/util/error/terminal.go rename internal/{operator-controller/httputil => shared/util/http}/certlog.go (99%) rename internal/{operator-controller/httputil => shared/util/http}/certpoolwatcher.go (99%) rename internal/{operator-controller/httputil => shared/util/http}/certpoolwatcher_test.go (95%) rename internal/{operator-controller/httputil => shared/util/http}/certutil.go (98%) rename internal/{operator-controller/httputil => shared/util/http}/certutil_test.go (67%) rename internal/{operator-controller/httputil => shared/util/http}/httputil.go (96%) create mode 100644 internal/shared/util/image/cache.go create mode 100644 internal/shared/util/image/cache_test.go create mode 100644 internal/shared/util/image/filters.go rename internal/shared/util/image/{layers_test.go => filters_test.go} (98%) delete mode 100644 internal/shared/util/image/layers.go create mode 100644 internal/shared/util/image/mocks.go create mode 100644 internal/shared/util/image/pull.go create mode 100644 internal/shared/util/image/pull_test.go diff --git a/.gitignore b/.gitignore index 5c60f79f2..d238a6125 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ vendor/ # editor and IDE paraphernalia .idea/ +.run/ *.swp *.swo *~ diff --git a/catalogd/cmd/catalogd/main.go b/catalogd/cmd/catalogd/main.go index 693a26b6c..e8b3ecf66 100644 --- a/catalogd/cmd/catalogd/main.go +++ b/catalogd/cmd/catalogd/main.go @@ -17,11 +17,11 @@ limitations under the License. package main import ( + "context" "crypto/tls" "errors" "flag" "fmt" - "log" "net/url" "os" "path/filepath" @@ -29,7 +29,6 @@ import ( "time" "github.com/containers/image/v5/types" - "github.com/go-logr/logr" "github.com/sirupsen/logrus" "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" @@ -50,6 +49,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -61,10 +61,10 @@ import ( "github.com/operator-framework/operator-controller/internal/catalogd/garbagecollection" catalogdmetrics "github.com/operator-framework/operator-controller/internal/catalogd/metrics" "github.com/operator-framework/operator-controller/internal/catalogd/serverutil" - "github.com/operator-framework/operator-controller/internal/catalogd/source" "github.com/operator-framework/operator-controller/internal/catalogd/storage" "github.com/operator-framework/operator-controller/internal/catalogd/webhook" fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" "github.com/operator-framework/operator-controller/internal/shared/version" ) @@ -177,7 +177,8 @@ func main() { cw, err := certwatcher.New(certFile, keyFile) if err != nil { - log.Fatalf("Failed to initialize certificate watcher: %v", err) + setupLog.Error(err, "failed to initialize certificate watcher") + os.Exit(1) } tlsOpts := func(config *tls.Config) { @@ -273,14 +274,16 @@ func main() { os.Exit(1) } - unpackCacheBasePath := filepath.Join(cacheDir, source.UnpackCacheDir) + unpackCacheBasePath := filepath.Join(cacheDir, "unpack") if err := os.MkdirAll(unpackCacheBasePath, 0770); err != nil { setupLog.Error(err, "unable to create cache directory for unpacking") os.Exit(1) } - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: unpackCacheBasePath, - SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) { + + imageCache := imageutil.CatalogCache(unpackCacheBasePath) + imagePuller := &imageutil.ContainersImagePuller{ + SourceCtxFunc: func(ctx context.Context) (*types.SystemContext, error) { + logger := log.FromContext(ctx) srcContext := &types.SystemContext{ DockerCertPath: pullCasDir, OCICertPath: pullCasDir, @@ -334,9 +337,10 @@ func main() { } if err = (&corecontrollers.ClusterCatalogReconciler{ - Client: mgr.GetClient(), - Unpacker: unpacker, - Storage: localStorage, + Client: mgr.GetClient(), + ImageCache: imageCache, + ImagePuller: imagePuller, + Storage: localStorage, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterCatalog") os.Exit(1) diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index 869e43170..51db2fe14 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -29,7 +29,6 @@ import ( "time" "github.com/containers/image/v5/types" - "github.com/go-logr/logr" "github.com/sirupsen/logrus" "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" @@ -49,6 +48,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" crfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer" "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics/filters" "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -65,12 +65,12 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/controllers" "github.com/operator-framework/operator-controller/internal/operator-controller/features" "github.com/operator-framework/operator-controller/internal/operator-controller/finalizers" - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" "github.com/operator-framework/operator-controller/internal/operator-controller/resolve" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" - "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/source" "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" + httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" "github.com/operator-framework/operator-controller/internal/shared/version" ) @@ -315,13 +315,14 @@ func main() { os.Exit(1) } - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: filepath.Join(cachePath, "unpack"), - SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) { + imageCache := imageutil.BundleCache(filepath.Join(cachePath, "unpack")) + imagePuller := &imageutil.ContainersImagePuller{ + SourceCtxFunc: func(ctx context.Context) (*types.SystemContext, error) { srcContext := &types.SystemContext{ DockerCertPath: pullCasDir, OCICertPath: pullCasDir, } + logger := log.FromContext(ctx) if _, err := os.Stat(authFilePath); err == nil && globalPullSecretKey != nil { logger.Info("using available authentication information for pulling image") srcContext.AuthFilePath = authFilePath @@ -336,7 +337,7 @@ func main() { clusterExtensionFinalizers := crfinalizer.NewFinalizers() if err := clusterExtensionFinalizers.Register(controllers.ClusterExtensionCleanupUnpackCacheFinalizer, finalizers.FinalizerFunc(func(ctx context.Context, obj client.Object) (crfinalizer.Result, error) { - return crfinalizer.Result{}, unpacker.Cleanup(ctx, &source.BundleSource{Name: obj.GetName()}) + return crfinalizer.Result{}, imageCache.Delete(ctx, obj.GetName()) })); err != nil { setupLog.Error(err, "unable to register finalizer", "finalizerKey", controllers.ClusterExtensionCleanupUnpackCacheFinalizer) os.Exit(1) @@ -399,7 +400,8 @@ func main() { if err = (&controllers.ClusterExtensionReconciler{ Client: cl, Resolver: resolver, - Unpacker: unpacker, + ImageCache: imageCache, + ImagePuller: imagePuller, Applier: helmApplier, InstalledBundleGetter: &controllers.DefaultInstalledBundleGetter{ActionClientGetter: acg}, Finalizers: clusterExtensionFinalizers, diff --git a/go.mod b/go.mod index d97c0ff47..5eb57937e 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.1.0 github.com/operator-framework/api v0.29.0 github.com/operator-framework/helm-operator-plugins v0.8.0 github.com/operator-framework/operator-registry v1.50.0 @@ -177,7 +178,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 // indirect github.com/operator-framework/operator-lib v0.17.0 // indirect diff --git a/internal/catalogd/controllers/core/clustercatalog_controller.go b/internal/catalogd/controllers/core/clustercatalog_controller.go index 7dd1de79e..68a4c6516 100644 --- a/internal/catalogd/controllers/core/clustercatalog_controller.go +++ b/internal/catalogd/controllers/core/clustercatalog_controller.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "github.com/containers/image/v5/docker/reference" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,8 +39,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - "github.com/operator-framework/operator-controller/internal/catalogd/source" "github.com/operator-framework/operator-controller/internal/catalogd/storage" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" ) const ( @@ -52,8 +53,11 @@ const ( // ClusterCatalogReconciler reconciles a Catalog object type ClusterCatalogReconciler struct { client.Client - Unpacker source.Unpacker - Storage storage.Instance + + ImageCache imageutil.Cache + ImagePuller imageutil.Puller + + Storage storage.Instance finalizers crfinalizer.Finalizers @@ -66,8 +70,10 @@ type ClusterCatalogReconciler struct { } type storedCatalogData struct { + ref reference.Canonical + lastUnpack time.Time + lastSuccessfulPoll time.Time observedGeneration int64 - unpackResult source.Result } //+kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs,verbs=get;list;watch;create;update;patch;delete @@ -216,7 +222,7 @@ func (r *ClusterCatalogReconciler) reconcile(ctx context.Context, catalog *catal case catalog.Generation != storedCatalog.observedGeneration: l.Info("unpack required: catalog generation differs from observed generation") needsUnpack = true - case r.needsPoll(storedCatalog.unpackResult.LastSuccessfulPollAttempt.Time, catalog): + case r.needsPoll(storedCatalog.lastSuccessfulPoll, catalog): l.Info("unpack required: poll duration has elapsed") needsUnpack = true } @@ -224,42 +230,50 @@ func (r *ClusterCatalogReconciler) reconcile(ctx context.Context, catalog *catal if !needsUnpack { // No need to update the status because we've already checked // that it is set correctly. Otherwise, we'd be unpacking again. - return nextPollResult(storedCatalog.unpackResult.LastSuccessfulPollAttempt.Time, catalog), nil + return nextPollResult(storedCatalog.lastSuccessfulPoll, catalog), nil + } + + if catalog.Spec.Source.Type != catalogdv1.SourceTypeImage { + err := reconcile.TerminalError(fmt.Errorf("unknown source type %q", catalog.Spec.Source.Type)) + updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), err) + return ctrl.Result{}, err + } + if catalog.Spec.Source.Image == nil { + err := reconcile.TerminalError(fmt.Errorf("error parsing ClusterCatalog %q, image source is nil", catalog.Name)) + updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), err) + return ctrl.Result{}, err } - unpackResult, err := r.Unpacker.Unpack(ctx, catalog) + fsys, canonicalRef, unpackTime, err := r.ImagePuller.Pull(ctx, catalog.Name, catalog.Spec.Source.Image.Ref, r.ImageCache) if err != nil { unpackErr := fmt.Errorf("source catalog content: %w", err) updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), unpackErr) return ctrl.Result{}, unpackErr } - switch unpackResult.State { - case source.StateUnpacked: - // TODO: We should check to see if the unpacked result has the same content - // as the already unpacked content. If it does, we should skip this rest - // of the unpacking steps. - err := r.Storage.Store(ctx, catalog.Name, unpackResult.FS) - if err != nil { - storageErr := fmt.Errorf("error storing fbc: %v", err) - updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), storageErr) - return ctrl.Result{}, storageErr - } - baseURL := r.Storage.BaseURL(catalog.Name) - - updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), nil) - updateStatusServing(&catalog.Status, *unpackResult, baseURL, catalog.GetGeneration()) - default: - panic(fmt.Sprintf("unknown unpack state %q", unpackResult.State)) + // TODO: We should check to see if the unpacked result has the same content + // as the already unpacked content. If it does, we should skip this rest + // of the unpacking steps. + if err := r.Storage.Store(ctx, catalog.Name, fsys); err != nil { + storageErr := fmt.Errorf("error storing fbc: %v", err) + updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), storageErr) + return ctrl.Result{}, storageErr } + baseURL := r.Storage.BaseURL(catalog.Name) + + updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), nil) + updateStatusServing(&catalog.Status, canonicalRef, unpackTime, baseURL, catalog.GetGeneration()) + lastSuccessfulPoll := time.Now() r.storedCatalogsMu.Lock() r.storedCatalogs[catalog.Name] = storedCatalogData{ - unpackResult: *unpackResult, + ref: canonicalRef, + lastUnpack: unpackTime, + lastSuccessfulPoll: lastSuccessfulPoll, observedGeneration: catalog.GetGeneration(), } r.storedCatalogsMu.Unlock() - return nextPollResult(unpackResult.LastSuccessfulPollAttempt.Time, catalog), nil + return nextPollResult(lastSuccessfulPoll, catalog), nil } func (r *ClusterCatalogReconciler) getCurrentState(catalog *catalogdv1.ClusterCatalog) (*catalogdv1.ClusterCatalogStatus, storedCatalogData, bool) { @@ -272,7 +286,7 @@ func (r *ClusterCatalogReconciler) getCurrentState(catalog *catalogdv1.ClusterCa // Set expected status based on what we see in the stored catalog clearUnknownConditions(expectedStatus) if hasStoredCatalog && r.Storage.ContentExists(catalog.Name) { - updateStatusServing(expectedStatus, storedCatalog.unpackResult, r.Storage.BaseURL(catalog.Name), storedCatalog.observedGeneration) + updateStatusServing(expectedStatus, storedCatalog.ref, storedCatalog.lastUnpack, r.Storage.BaseURL(catalog.Name), storedCatalog.observedGeneration) updateStatusProgressing(expectedStatus, storedCatalog.observedGeneration, nil) } @@ -325,13 +339,17 @@ func updateStatusProgressing(status *catalogdv1.ClusterCatalogStatus, generation meta.SetStatusCondition(&status.Conditions, progressingCond) } -func updateStatusServing(status *catalogdv1.ClusterCatalogStatus, result source.Result, baseURL string, generation int64) { - status.ResolvedSource = result.ResolvedSource - if status.URLs == nil { - status.URLs = &catalogdv1.ClusterCatalogURLs{} +func updateStatusServing(status *catalogdv1.ClusterCatalogStatus, ref reference.Canonical, modTime time.Time, baseURL string, generation int64) { + status.ResolvedSource = &catalogdv1.ResolvedCatalogSource{ + Type: catalogdv1.SourceTypeImage, + Image: &catalogdv1.ResolvedImageSource{ + Ref: ref.String(), + }, + } + status.URLs = &catalogdv1.ClusterCatalogURLs{ + Base: baseURL, } - status.URLs.Base = baseURL - status.LastUnpacked = ptr.To(metav1.NewTime(result.UnpackTime)) + status.LastUnpacked = ptr.To(metav1.NewTime(modTime.Truncate(time.Second))) meta.SetStatusCondition(&status.Conditions, metav1.Condition{ Type: catalogdv1.TypeServing, Status: metav1.ConditionTrue, @@ -434,7 +452,7 @@ func (r *ClusterCatalogReconciler) deleteCatalogCache(ctx context.Context, catal return err } updateStatusNotServing(&catalog.Status, catalog.GetGeneration()) - if err := r.Unpacker.Cleanup(ctx, catalog); err != nil { + if err := r.ImageCache.Delete(ctx, catalog.Name); err != nil { updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), err) return err } diff --git a/internal/catalogd/controllers/core/clustercatalog_controller_test.go b/internal/catalogd/controllers/core/clustercatalog_controller_test.go index 84db330ac..d7b69eee2 100644 --- a/internal/catalogd/controllers/core/clustercatalog_controller_test.go +++ b/internal/catalogd/controllers/core/clustercatalog_controller_test.go @@ -10,6 +10,7 @@ import ( "testing/fstest" "time" + "github.com/containers/image/v5/docker/reference" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" @@ -21,36 +22,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - "github.com/operator-framework/operator-controller/internal/catalogd/source" "github.com/operator-framework/operator-controller/internal/catalogd/storage" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" ) -var _ source.Unpacker = &MockSource{} - -// MockSource is a utility for mocking out an Unpacker source -type MockSource struct { - // result is the result that should be returned when MockSource.Unpack is called - result *source.Result - - // error is the error to be returned when MockSource.Unpack is called - unpackError error - - // cleanupError is the error to be returned when MockSource.Cleanup is called - cleanupError error -} - -func (ms *MockSource) Unpack(_ context.Context, _ *catalogdv1.ClusterCatalog) (*source.Result, error) { - if ms.unpackError != nil { - return nil, ms.unpackError - } - - return ms.result, nil -} - -func (ms *MockSource) Cleanup(_ context.Context, _ *catalogdv1.ClusterCatalog) error { - return ms.cleanupError -} - var _ storage.Instance = &MockStore{} type MockStore struct { @@ -88,14 +63,14 @@ func TestCatalogdControllerReconcile(t *testing.T) { name string catalog *catalogdv1.ClusterCatalog expectedError error - shouldPanic bool expectedCatalog *catalogdv1.ClusterCatalog - source source.Unpacker + puller imageutil.Puller + cache imageutil.Cache store storage.Instance }{ { - name: "invalid source type, panics", - source: &MockSource{}, + name: "invalid source type, returns error", + puller: &imageutil.MockPuller{}, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ ObjectMeta: metav1.ObjectMeta{ @@ -108,7 +83,7 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, }, }, - shouldPanic: true, + expectedError: reconcile.TerminalError(errors.New(`unknown source type "invalid"`)), expectedCatalog: &catalogdv1.ClusterCatalog{ ObjectMeta: metav1.ObjectMeta{ Name: "catalog", @@ -132,9 +107,9 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "valid source type, unpack returns error, status updated to reflect error state and error is returned", - expectedError: fmt.Errorf("source catalog content: %w", fmt.Errorf("mocksource error")), - source: &MockSource{ - unpackError: errors.New("mocksource error"), + expectedError: fmt.Errorf("source catalog content: %w", fmt.Errorf("mockpuller error")), + puller: &imageutil.MockPuller{ + Error: errors.New("mockpuller error"), }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -177,9 +152,9 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "valid source type, unpack returns terminal error, status updated to reflect terminal error state(Blocked) and error is returned", - expectedError: fmt.Errorf("source catalog content: %w", reconcile.TerminalError(fmt.Errorf("mocksource terminal error"))), - source: &MockSource{ - unpackError: reconcile.TerminalError(errors.New("mocksource terminal error")), + expectedError: fmt.Errorf("source catalog content: %w", reconcile.TerminalError(fmt.Errorf("mockpuller terminal error"))), + puller: &imageutil.MockPuller{ + Error: reconcile.TerminalError(errors.New("mockpuller terminal error")), }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -222,16 +197,9 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "valid source type, unpack state == Unpacked, should reflect in status that it's progressing, and is serving", - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - ResolvedSource: &catalogdv1.ResolvedCatalogSource{ - Image: &catalogdv1.ResolvedImageSource{ - Ref: "my.org/someimage@someSHA256Digest", - }, - }, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, + Ref: mustRef(t, "my.org/someimage@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -276,8 +244,9 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, }, ResolvedSource: &catalogdv1.ResolvedCatalogSource{ + Type: catalogdv1.SourceTypeImage, Image: &catalogdv1.ResolvedImageSource{ - Ref: "my.org/someimage@someSHA256Digest", + Ref: "my.org/someimage@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, }, LastUnpacked: &metav1.Time{}, @@ -287,11 +256,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { { name: "valid source type, unpack state == Unpacked, storage fails, failure reflected in status and error returned", expectedError: fmt.Errorf("error storing fbc: mockstore store error"), - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{ shouldError: true, @@ -336,11 +302,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "storage finalizer not set, storage finalizer gets set", - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -373,11 +336,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), finalizer removed", - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -449,11 +409,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { { name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), storage delete failed, error returned, finalizer not removed and catalog continues serving", expectedError: fmt.Errorf("finalizer %q failed: %w", fbcDeletionFinalizer, fmt.Errorf("mockstore delete error")), - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{ shouldError: true, @@ -521,11 +478,9 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), unpack cleanup failed, error returned, finalizer not removed but catalog stops serving", - expectedError: fmt.Errorf("finalizer %q failed: %w", fbcDeletionFinalizer, fmt.Errorf("mocksource cleanup error")), - source: &MockSource{ - unpackError: nil, - cleanupError: fmt.Errorf("mocksource cleanup error"), - }, + expectedError: fmt.Errorf("finalizer %q failed: %w", fbcDeletionFinalizer, fmt.Errorf("mockcache delete error")), + puller: &imageutil.MockPuller{}, + cache: &imageutil.MockCache{DeleteErr: fmt.Errorf("mockcache delete error")}, store: &MockStore{ shouldError: false, }, @@ -590,11 +545,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "catalog availability set to disabled, status.urls should get unset", - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -664,11 +616,8 @@ func TestCatalogdControllerReconcile(t *testing.T) { }, { name: "catalog availability set to disabled, finalizer should get removed", - source: &MockSource{ - result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - }, + puller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, }, store: &MockStore{}, catalog: &catalogdv1.ClusterCatalog{ @@ -742,18 +691,17 @@ func TestCatalogdControllerReconcile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { reconciler := &ClusterCatalogReconciler{ Client: nil, - Unpacker: tt.source, + ImagePuller: tt.puller, + ImageCache: tt.cache, Storage: tt.store, storedCatalogs: map[string]storedCatalogData{}, } + if reconciler.ImageCache == nil { + reconciler.ImageCache = &imageutil.MockCache{} + } require.NoError(t, reconciler.setupFinalizers()) ctx := context.Background() - if tt.shouldPanic { - assert.Panics(t, func() { _, _ = reconciler.reconcile(ctx, tt.catalog) }) - return - } - res, err := reconciler.reconcile(ctx, tt.catalog) assert.Equal(t, ctrl.Result{}, res) // errors are aggregated/wrapped @@ -775,7 +723,7 @@ func TestPollingRequeue(t *testing.T) { for name, tc := range map[string]struct { catalog *catalogdv1.ClusterCatalog expectedRequeueAfter time.Duration - lastPollTime metav1.Time + lastPollTime time.Time }{ "ClusterCatalog with tag based image ref without any poll interval specified, requeueAfter set to 0, ie polling disabled": { catalog: &catalogdv1.ClusterCatalog{ @@ -793,9 +741,9 @@ func TestPollingRequeue(t *testing.T) { }, }, expectedRequeueAfter: time.Second * 0, - lastPollTime: metav1.Now(), + lastPollTime: time.Now(), }, - "ClusterCatalog with tag based image ref with poll interval specified, requeueAfter set to wait.jitter(pollInterval)": { + "ClusterCatalog with tag based image ref with poll interval specified, just polled, requeueAfter set to wait.jitter(pollInterval)": { catalog: &catalogdv1.ClusterCatalog{ ObjectMeta: metav1.ObjectMeta{ Name: "test-catalog", @@ -812,28 +760,60 @@ func TestPollingRequeue(t *testing.T) { }, }, expectedRequeueAfter: time.Minute * 5, - lastPollTime: metav1.Now(), + lastPollTime: time.Now(), + }, + "ClusterCatalog with tag based image ref with poll interval specified, last polled 2m ago, requeueAfter set to wait.jitter(pollInterval-2)": { + catalog: &catalogdv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-catalog", + Finalizers: []string{fbcDeletionFinalizer}, + }, + Spec: catalogdv1.ClusterCatalogSpec{ + Source: catalogdv1.CatalogSource{ + Type: catalogdv1.SourceTypeImage, + Image: &catalogdv1.ImageSource{ + Ref: "my.org/someimage:latest", + PollIntervalMinutes: ptr.To(5), + }, + }, + }, + }, + expectedRequeueAfter: time.Minute * 3, + lastPollTime: time.Now().Add(-2 * time.Minute), }, } { t.Run(name, func(t *testing.T) { + ref := mustRef(t, "my.org/someimage@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + tc.catalog.Status = catalogdv1.ClusterCatalogStatus{ + Conditions: []metav1.Condition{ + {Type: catalogdv1.TypeServing, Status: metav1.ConditionTrue, Reason: catalogdv1.ReasonAvailable, Message: "Serving desired content from resolved source", LastTransitionTime: metav1.Now()}, + {Type: catalogdv1.TypeProgressing, Status: metav1.ConditionTrue, Reason: catalogdv1.ReasonSucceeded, Message: "Successfully unpacked and stored content from resolved source", LastTransitionTime: metav1.Now()}, + }, + ResolvedSource: &catalogdv1.ResolvedCatalogSource{ + Type: catalogdv1.SourceTypeImage, + Image: &catalogdv1.ResolvedImageSource{Ref: ref.String()}, + }, + URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"}, + LastUnpacked: ptr.To(metav1.NewTime(time.Now().Truncate(time.Second))), + } reconciler := &ClusterCatalogReconciler{ Client: nil, - Unpacker: &MockSource{result: &source.Result{ - State: source.StateUnpacked, - FS: &fstest.MapFS{}, - ResolvedSource: &catalogdv1.ResolvedCatalogSource{ - Image: &catalogdv1.ResolvedImageSource{ - Ref: "my.org/someImage@someSHA256Digest", - }, + ImagePuller: &imageutil.MockPuller{ + ImageFS: &fstest.MapFS{}, + Ref: ref, + }, + Storage: &MockStore{}, + storedCatalogs: map[string]storedCatalogData{ + tc.catalog.Name: { + ref: ref, + lastSuccessfulPoll: tc.lastPollTime, + lastUnpack: tc.catalog.Status.LastUnpacked.Time, }, - LastSuccessfulPollAttempt: tc.lastPollTime, - }}, - Storage: &MockStore{}, - storedCatalogs: map[string]storedCatalogData{}, + }, } require.NoError(t, reconciler.setupFinalizers()) res, _ := reconciler.reconcile(context.Background(), tc.catalog) - assert.InDelta(t, tc.expectedRequeueAfter, res.RequeueAfter, requeueJitterMaxFactor*float64(tc.expectedRequeueAfter)) + assert.InDelta(t, tc.expectedRequeueAfter, res.RequeueAfter, 2*requeueJitterMaxFactor*float64(tc.expectedRequeueAfter)) }) } } @@ -843,6 +823,8 @@ func TestPollingReconcilerUnpack(t *testing.T) { newDigest := "f42337e7b85a46d83c94694638e2312e10ca16a03542399a65ba783c94a32b63" successfulObservedGeneration := int64(2) + successfulRef := mustRef(t, "my.org/someimage@sha256:"+oldDigest) + successfulUnpackTime := time.Time{} successfulUnpackStatus := func(mods ...func(status *catalogdv1.ClusterCatalogStatus)) catalogdv1.ClusterCatalogStatus { s := catalogdv1.ClusterCatalogStatus{ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"}, @@ -865,24 +847,23 @@ func TestPollingReconcilerUnpack(t *testing.T) { ResolvedSource: &catalogdv1.ResolvedCatalogSource{ Type: catalogdv1.SourceTypeImage, Image: &catalogdv1.ResolvedImageSource{ - Ref: "my.org/someimage@sha256:" + oldDigest, + Ref: successfulRef.String(), }, }, - LastUnpacked: &metav1.Time{}, + LastUnpacked: ptr.To(metav1.NewTime(successfulUnpackTime)), } for _, mod := range mods { mod(&s) } return s } - successfulStoredCatalogData := func(lastPoll metav1.Time) map[string]storedCatalogData { + successfulStoredCatalogData := func(lastPoll time.Time) map[string]storedCatalogData { return map[string]storedCatalogData{ "test-catalog": { observedGeneration: successfulObservedGeneration, - unpackResult: source.Result{ - ResolvedSource: successfulUnpackStatus().ResolvedSource, - LastSuccessfulPollAttempt: lastPoll, - }, + ref: successfulRef, + lastUnpack: successfulUnpackTime, + lastSuccessfulPoll: lastPoll, }, } } @@ -927,7 +908,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { }, Status: successfulUnpackStatus(), }, - storedCatalogData: successfulStoredCatalogData(metav1.Now()), + storedCatalogData: successfulStoredCatalogData(time.Now()), expectedUnpackRun: false, }, "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is before next expected poll time, unpack should not run": { @@ -948,7 +929,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { }, Status: successfulUnpackStatus(), }, - storedCatalogData: successfulStoredCatalogData(metav1.Now()), + storedCatalogData: successfulStoredCatalogData(time.Now()), expectedUnpackRun: false, }, "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is after next expected poll time, unpack should run": { @@ -969,7 +950,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { }, Status: successfulUnpackStatus(), }, - storedCatalogData: successfulStoredCatalogData(metav1.NewTime(time.Now().Add(-5 * time.Minute))), + storedCatalogData: successfulStoredCatalogData(time.Now().Add(-5 * time.Minute)), expectedUnpackRun: true, }, "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is before next expected poll time, generation changed, unpack should run": { @@ -990,7 +971,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { }, Status: successfulUnpackStatus(), }, - storedCatalogData: successfulStoredCatalogData(metav1.Now()), + storedCatalogData: successfulStoredCatalogData(time.Now()), expectedUnpackRun: true, }, "ClusterCatalog not being resolved the first time, no stored catalog in cache, unpack should run": { @@ -1033,7 +1014,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { meta.FindStatusCondition(status.Conditions, catalogdv1.TypeProgressing).Status = metav1.ConditionTrue }), }, - storedCatalogData: successfulStoredCatalogData(metav1.Now()), + storedCatalogData: successfulStoredCatalogData(time.Now()), expectedUnpackRun: true, }, } { @@ -1044,7 +1025,7 @@ func TestPollingReconcilerUnpack(t *testing.T) { } reconciler := &ClusterCatalogReconciler{ Client: nil, - Unpacker: &MockSource{unpackError: errors.New("mocksource error")}, + ImagePuller: &imageutil.MockPuller{Error: errors.New("mockpuller error")}, Storage: &MockStore{}, storedCatalogs: scd, } @@ -1058,3 +1039,12 @@ func TestPollingReconcilerUnpack(t *testing.T) { }) } } + +func mustRef(t *testing.T, ref string) reference.Canonical { + t.Helper() + p, err := reference.Parse(ref) + if err != nil { + t.Fatal(err) + } + return p.(reference.Canonical) +} diff --git a/internal/catalogd/source/containers_image.go b/internal/catalogd/source/containers_image.go deleted file mode 100644 index 1ecc93237..000000000 --- a/internal/catalogd/source/containers_image.go +++ /dev/null @@ -1,340 +0,0 @@ -package source - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/containers/image/v5/copy" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/oci/layout" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/signature" - "github.com/containers/image/v5/types" - "github.com/go-logr/logr" - "github.com/opencontainers/go-digest" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" - fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" - imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" -) - -const ConfigDirLabel = "operators.operatorframework.io.index.configs.v1" - -var insecurePolicy = []byte(`{"default":[{"type":"insecureAcceptAnything"}]}`) - -type ContainersImageRegistry struct { - BaseCachePath string - SourceContextFunc func(logger logr.Logger) (*types.SystemContext, error) -} - -func (i *ContainersImageRegistry) Unpack(ctx context.Context, catalog *catalogdv1.ClusterCatalog) (*Result, error) { - l := log.FromContext(ctx) - - if catalog.Spec.Source.Type != catalogdv1.SourceTypeImage { - panic(fmt.Sprintf("programmer error: source type %q is unable to handle specified catalog source type %q", catalogdv1.SourceTypeImage, catalog.Spec.Source.Type)) - } - - if catalog.Spec.Source.Image == nil { - return nil, reconcile.TerminalError(fmt.Errorf("error parsing catalog, catalog %s has a nil image source", catalog.Name)) - } - - // Reload registries cache in case of configuration update - sysregistriesv2.InvalidateCache() - - srcCtx, err := i.SourceContextFunc(l) - if err != nil { - return nil, err - } - - res, err := i.unpack(ctx, catalog, srcCtx, l) - if err != nil { - // Log any CertificateVerificationErrors, and log Docker Certificates if necessary - if httputil.LogCertificateVerificationError(err, l) { - httputil.LogDockerCertificates(srcCtx.DockerCertPath, l) - } - } - return res, err -} - -func (i *ContainersImageRegistry) unpack(ctx context.Context, catalog *catalogdv1.ClusterCatalog, srcCtx *types.SystemContext, l logr.Logger) (*Result, error) { - ////////////////////////////////////////////////////// - // - // Resolve a canonical reference for the image. - // - ////////////////////////////////////////////////////// - imgRef, canonicalRef, specIsCanonical, err := resolveReferences(ctx, catalog.Spec.Source.Image.Ref, srcCtx) - if err != nil { - return nil, err - } - - ////////////////////////////////////////////////////// - // - // Check if the image is already unpacked. If it is, - // return the unpacked directory. - // - ////////////////////////////////////////////////////// - unpackPath := i.unpackPath(catalog.Name, canonicalRef.Digest()) - if unpackTime, err := fsutil.GetDirectoryModTime(unpackPath); err == nil { - l.Info("image already unpacked", "ref", imgRef.String(), "digest", canonicalRef.Digest().String()) - return successResult(unpackPath, canonicalRef, unpackTime), nil - } else if errors.Is(err, fsutil.ErrNotDirectory) { - if err := fsutil.DeleteReadOnlyRecursive(unpackPath); err != nil { - return nil, err - } - } else if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("error checking image already unpacked: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Create a docker reference for the source and an OCI - // layout reference for the destination, where we will - // temporarily store the image in order to unpack it. - // - // We use the OCI layout as a temporary storage because - // copy.Image can concurrently pull all the layers. - // - ////////////////////////////////////////////////////// - dockerRef, err := docker.NewReference(imgRef) - if err != nil { - return nil, fmt.Errorf("error creating source reference: %w", err) - } - - layoutDir, err := os.MkdirTemp("", fmt.Sprintf("oci-layout-%s", catalog.Name)) - if err != nil { - return nil, fmt.Errorf("error creating temporary directory: %w", err) - } - defer func() { - if err := os.RemoveAll(layoutDir); err != nil { - l.Error(err, "error removing temporary OCI layout directory") - } - }() - - layoutRef, err := layout.NewReference(layoutDir, canonicalRef.String()) - if err != nil { - return nil, fmt.Errorf("error creating reference: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Load an image signature policy and build - // a policy context for the image pull. - // - ////////////////////////////////////////////////////// - policyContext, err := loadPolicyContext(srcCtx, l) - if err != nil { - return nil, fmt.Errorf("error loading policy context: %w", err) - } - defer func() { - if err := policyContext.Destroy(); err != nil { - l.Error(err, "error destroying policy context") - } - }() - - ////////////////////////////////////////////////////// - // - // Pull the image from the source to the destination - // - ////////////////////////////////////////////////////// - if _, err := copy.Image(ctx, policyContext, layoutRef, dockerRef, ©.Options{ - SourceCtx: srcCtx, - // We use the OCI layout as a temporary storage and - // pushing signatures for OCI images is not supported - // so we remove the source signatures when copying. - // Signature validation will still be performed - // accordingly to a provided policy context. - RemoveSignatures: true, - }); err != nil { - return nil, fmt.Errorf("error copying image: %w", err) - } - l.Info("pulled image", "ref", imgRef.String(), "digest", canonicalRef.Digest().String()) - - ////////////////////////////////////////////////////// - // - // Mount the image we just pulled - // - ////////////////////////////////////////////////////// - if err := i.unpackImage(ctx, unpackPath, layoutRef, specIsCanonical, srcCtx); err != nil { - if cleanupErr := fsutil.DeleteReadOnlyRecursive(unpackPath); cleanupErr != nil { - err = errors.Join(err, cleanupErr) - } - return nil, fmt.Errorf("error unpacking image: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Delete other images. They are no longer needed. - // - ////////////////////////////////////////////////////// - if err := i.deleteOtherImages(catalog.Name, canonicalRef.Digest()); err != nil { - return nil, fmt.Errorf("error deleting old images: %w", err) - } - - return successResult(unpackPath, canonicalRef, time.Now()), nil -} - -func successResult(unpackPath string, canonicalRef reference.Canonical, lastUnpacked time.Time) *Result { - return &Result{ - FS: os.DirFS(unpackPath), - ResolvedSource: &catalogdv1.ResolvedCatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ResolvedImageSource{ - Ref: canonicalRef.String(), - }, - }, - State: StateUnpacked, - Message: fmt.Sprintf("unpacked %q successfully", canonicalRef), - - // We truncate both the unpack time and last successful poll attempt - // to the second because metav1.Time is serialized - // as RFC 3339 which only has second-level precision. When we - // use this result in a comparison with what we deserialized - // from the Kubernetes API server, we need it to match. - UnpackTime: lastUnpacked.Truncate(time.Second), - LastSuccessfulPollAttempt: metav1.NewTime(time.Now().Truncate(time.Second)), - } -} - -func (i *ContainersImageRegistry) Cleanup(_ context.Context, catalog *catalogdv1.ClusterCatalog) error { - if err := fsutil.DeleteReadOnlyRecursive(i.catalogPath(catalog.Name)); err != nil { - return fmt.Errorf("error deleting catalog cache: %w", err) - } - return nil -} - -func (i *ContainersImageRegistry) catalogPath(catalogName string) string { - return filepath.Join(i.BaseCachePath, catalogName) -} - -func (i *ContainersImageRegistry) unpackPath(catalogName string, digest digest.Digest) string { - return filepath.Join(i.catalogPath(catalogName), digest.String()) -} - -func resolveReferences(ctx context.Context, ref string, sourceContext *types.SystemContext) (reference.Named, reference.Canonical, bool, error) { - imgRef, err := reference.ParseNamed(ref) - if err != nil { - return nil, nil, false, reconcile.TerminalError(fmt.Errorf("error parsing image reference %q: %w", ref, err)) - } - - canonicalRef, isCanonical, err := resolveCanonicalRef(ctx, imgRef, sourceContext) - if err != nil { - return nil, nil, false, fmt.Errorf("error resolving canonical reference: %w", err) - } - return imgRef, canonicalRef, isCanonical, nil -} - -func resolveCanonicalRef(ctx context.Context, imgRef reference.Named, imageCtx *types.SystemContext) (reference.Canonical, bool, error) { - if canonicalRef, ok := imgRef.(reference.Canonical); ok { - return canonicalRef, true, nil - } - - srcRef, err := docker.NewReference(imgRef) - if err != nil { - return nil, false, reconcile.TerminalError(fmt.Errorf("error creating reference: %w", err)) - } - - imgSrc, err := srcRef.NewImageSource(ctx, imageCtx) - if err != nil { - return nil, false, fmt.Errorf("error creating image source: %w", err) - } - defer imgSrc.Close() - - imgManifestData, _, err := imgSrc.GetManifest(ctx, nil) - if err != nil { - return nil, false, fmt.Errorf("error getting manifest: %w", err) - } - imgDigest, err := manifest.Digest(imgManifestData) - if err != nil { - return nil, false, fmt.Errorf("error getting digest of manifest: %w", err) - } - canonicalRef, err := reference.WithDigest(reference.TrimNamed(imgRef), imgDigest) - if err != nil { - return nil, false, fmt.Errorf("error creating canonical reference: %w", err) - } - return canonicalRef, false, nil -} - -func loadPolicyContext(sourceContext *types.SystemContext, l logr.Logger) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(sourceContext) - // TODO: there are security implications to silently moving to an insecure policy - // tracking issue: https://github.com/operator-framework/operator-controller/issues/1622 - if err != nil { - l.Info("no default policy found, using insecure policy") - policy, err = signature.NewPolicyFromBytes(insecurePolicy) - } - if err != nil { - return nil, fmt.Errorf("error loading default policy: %w", err) - } - return signature.NewPolicyContext(policy) -} - -func (i *ContainersImageRegistry) unpackImage(ctx context.Context, unpackPath string, imageReference types.ImageReference, specIsCanonical bool, sourceContext *types.SystemContext) error { - img, err := imageReference.NewImage(ctx, sourceContext) - if err != nil { - return fmt.Errorf("error reading image: %w", err) - } - defer func() { - if err := img.Close(); err != nil { - panic(err) - } - }() - - layoutSrc, err := imageReference.NewImageSource(ctx, sourceContext) - if err != nil { - return fmt.Errorf("error creating image source: %w", err) - } - - cfg, err := img.OCIConfig(ctx) - if err != nil { - return fmt.Errorf("error parsing image config: %w", err) - } - - dirToUnpack, ok := cfg.Config.Labels[ConfigDirLabel] - if !ok { - // If the spec is a tagged ref, retries could end up resolving a new digest, where the label - // might show up. If the spec is canonical, no amount of retries will make the label appear. - // Therefore, we treat the error as terminal if the reference from the spec is canonical. - return wrapTerminal(fmt.Errorf("catalog image is missing the required label %q", ConfigDirLabel), specIsCanonical) - } - - applyFilter := imageutil.AllFilters( - imageutil.OnlyPath(dirToUnpack), - imageutil.ForceOwnershipRWX(), - ) - return imageutil.ApplyLayersToDisk(ctx, unpackPath, img, layoutSrc, applyFilter) -} - -func (i *ContainersImageRegistry) deleteOtherImages(catalogName string, digestToKeep digest.Digest) error { - catalogPath := i.catalogPath(catalogName) - imgDirs, err := os.ReadDir(catalogPath) - if err != nil { - return fmt.Errorf("error reading image directories: %w", err) - } - for _, imgDir := range imgDirs { - if imgDir.Name() == digestToKeep.String() { - continue - } - imgDirPath := filepath.Join(catalogPath, imgDir.Name()) - if err := fsutil.DeleteReadOnlyRecursive(imgDirPath); err != nil { - return fmt.Errorf("error removing image directory: %w", err) - } - } - return nil -} - -func wrapTerminal(err error, isTerminal bool) error { - if !isTerminal { - return err - } - return reconcile.TerminalError(err) -} diff --git a/internal/catalogd/source/containers_image_test.go b/internal/catalogd/source/containers_image_test.go deleted file mode 100644 index 59e8523b6..000000000 --- a/internal/catalogd/source/containers_image_test.go +++ /dev/null @@ -1,477 +0,0 @@ -package source_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "testing" - "time" - - "github.com/containers/image/v5/types" - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/registry" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/mutate" - "github.com/google/go-containerregistry/pkg/v1/random" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" - "github.com/operator-framework/operator-controller/internal/catalogd/source" -) - -func TestImageRegistry(t *testing.T) { - for _, tt := range []struct { - name string - // catalog is the Catalog passed to the Unpack function. - // if the Catalog.Spec.Source.Image.Ref field is empty, - // one is injected during test runtime to ensure it - // points to the registry created for the test - catalog *catalogdv1.ClusterCatalog - wantErr bool - terminal bool - image v1.Image - digestAlreadyExists bool - oldDigestExists bool - // refType is the type of image ref this test - // is using. Should be one of "tag","digest" - refType string - }{ - { - name: ".spec.source.image is nil", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: nil, - }, - }, - }, - wantErr: true, - terminal: true, - refType: "tag", - }, - { - name: ".spec.source.image.ref is unparsable", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "::)12-as^&8asd789A(::", - }, - }, - }, - }, - wantErr: true, - terminal: true, - refType: "tag", - }, - { - name: "tag based, image is missing required label", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: true, - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - return img - }(), - refType: "tag", - }, - { - name: "digest based, image is missing required label", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: true, - terminal: true, - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - return img - }(), - refType: "digest", - }, - { - name: "image doesn't exist", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: true, - refType: "tag", - }, - { - name: "tag based image, digest already exists in cache", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: false, - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - return img - }(), - digestAlreadyExists: true, - refType: "tag", - }, - { - name: "digest based image, digest already exists in cache", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: false, - digestAlreadyExists: true, - refType: "digest", - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - return img - }(), - }, - { - name: "old ref is cached", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: false, - oldDigestExists: true, - refType: "tag", - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - img, err = mutate.Config(img, v1.Config{ - Labels: map[string]string{ - source.ConfigDirLabel: "/configs", - }, - }) - if err != nil { - panic(err) - } - return img - }(), - }, - { - name: "tag ref, happy path", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: false, - refType: "tag", - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - img, err = mutate.Config(img, v1.Config{ - Labels: map[string]string{ - source.ConfigDirLabel: "/configs", - }, - }) - if err != nil { - panic(err) - } - return img - }(), - }, - { - name: "digest ref, happy path", - catalog: &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: "", - }, - }, - }, - }, - wantErr: false, - refType: "digest", - image: func() v1.Image { - img, err := random.Image(20, 3) - if err != nil { - panic(err) - } - img, err = mutate.Config(img, v1.Config{ - Labels: map[string]string{ - source.ConfigDirLabel: "/configs", - }, - }) - if err != nil { - panic(err) - } - return img - }(), - }, - } { - t.Run(tt.name, func(t *testing.T) { - // Create context, temporary cache directory, - // and image registry source - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - testCache := t.TempDir() - imgReg := &source.ContainersImageRegistry{ - BaseCachePath: testCache, - SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) { - return &types.SystemContext{ - OCIInsecureSkipTLSVerify: true, - DockerInsecureSkipTLSVerify: types.OptionalBoolTrue, - }, nil - }, - } - - // Create a logger with a simple function-based LogSink that writes to the buffer - var buf bytes.Buffer - logger := funcr.New(func(prefix, args string) { - buf.WriteString(fmt.Sprintf("%s %s\n", prefix, args)) - }, funcr.Options{Verbosity: 1}) - - // Add the logger into the context which will later be used - // in the Unpack function to get the logger - ctx = log.IntoContext(ctx, logger) - - // Start a new server running an image registry - srv := httptest.NewServer(registry.New()) - defer srv.Close() - - // parse the server url so we can grab just the host - url, err := url.Parse(srv.URL) - require.NoError(t, err) - - // Build the proper image name with {registry}/tt.imgName - imgName, err := name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image:test")) - require.NoError(t, err) - - // If an old digest should exist in the cache, create one - oldDigestDir := filepath.Join(testCache, tt.catalog.Name, "olddigest") - var oldDigestModTime time.Time - if tt.oldDigestExists { - require.NoError(t, os.MkdirAll(oldDigestDir, os.ModePerm)) - oldDigestDirStat, err := os.Stat(oldDigestDir) - require.NoError(t, err) - oldDigestModTime = oldDigestDirStat.ModTime() - } - - var digest v1.Hash - // if the test specifies a method that returns a v1.Image, - // call it and push the image to the registry - if tt.image != nil { - digest, err = tt.image.Digest() - require.NoError(t, err) - - // if the digest should already exist in the cache, create it - if tt.digestAlreadyExists { - err = os.MkdirAll(filepath.Join(testCache, tt.catalog.Name, digest.String()), os.ModePerm) - require.NoError(t, err) - } - - err = remote.Write(imgName, tt.image) - require.NoError(t, err) - - // if the image ref should be a digest ref, make it so - if tt.refType == "digest" { - imgName, err = name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image@sha256:"+digest.Hex)) - require.NoError(t, err) - } - } - - // Inject the image reference if needed - if tt.catalog.Spec.Source.Image != nil && tt.catalog.Spec.Source.Image.Ref == "" { - tt.catalog.Spec.Source.Image.Ref = imgName.Name() - } - - rs, err := imgReg.Unpack(ctx, tt.catalog) - if !tt.wantErr { - require.NoError(t, err) - assert.Equal(t, fmt.Sprintf("%s@sha256:%s", imgName.Context().Name(), digest.Hex), rs.ResolvedSource.Image.Ref) - assert.Equal(t, source.StateUnpacked, rs.State) - - unpackDir := filepath.Join(testCache, tt.catalog.Name, digest.String()) - assert.DirExists(t, unpackDir) - unpackDirStat, err := os.Stat(unpackDir) - require.NoError(t, err) - - entries, err := os.ReadDir(filepath.Join(testCache, tt.catalog.Name)) - require.NoError(t, err) - assert.Len(t, entries, 1) - // If the digest should already exist check that we actually hit it - if tt.digestAlreadyExists { - assert.Contains(t, buf.String(), "image already unpacked") - assert.Equal(t, rs.UnpackTime, unpackDirStat.ModTime().Truncate(time.Second)) - } else if tt.oldDigestExists { - assert.NotContains(t, buf.String(), "image already unpacked") - assert.NotEqual(t, rs.UnpackTime, oldDigestModTime) - assert.NoDirExists(t, oldDigestDir) - } else { - require.NotNil(t, rs.UnpackTime) - require.NotNil(t, rs.ResolvedSource.Image) - assert.False(t, rs.UnpackTime.IsZero()) - } - } else { - require.Error(t, err) - isTerminal := errors.Is(err, reconcile.TerminalError(nil)) - assert.Equal(t, tt.terminal, isTerminal, "expected terminal %v, got %v", tt.terminal, isTerminal) - } - - assert.NoError(t, imgReg.Cleanup(ctx, tt.catalog)) - assert.NoError(t, imgReg.Cleanup(ctx, tt.catalog), "cleanup should ignore missing files") - }) - } -} - -// TestImageRegistryMissingLabelConsistentFailure is a test -// case that specifically tests that multiple calls to the -// ImageRegistry.Unpack() method return an error and is meant -// to ensure coverage of the bug reported in -// https://github.com/operator-framework/operator-controller/catalogd/issues/206 -func TestImageRegistryMissingLabelConsistentFailure(t *testing.T) { - // Create context, temporary cache directory, - // and image registry source - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - testCache := t.TempDir() - imgReg := &source.ContainersImageRegistry{ - BaseCachePath: testCache, - SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) { - return &types.SystemContext{}, nil - }, - } - - // Start a new server running an image registry - srv := httptest.NewServer(registry.New()) - defer srv.Close() - - // parse the server url so we can grab just the host - url, err := url.Parse(srv.URL) - require.NoError(t, err) - - imgName, err := name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image:test")) - require.NoError(t, err) - - image, err := random.Image(20, 20) - require.NoError(t, err) - - err = remote.Write(imgName, image) - require.NoError(t, err) - - catalog := &catalogdv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: catalogdv1.ClusterCatalogSpec{ - Source: catalogdv1.CatalogSource{ - Type: catalogdv1.SourceTypeImage, - Image: &catalogdv1.ImageSource{ - Ref: imgName.Name(), - }, - }, - }, - } - - for i := 0; i < 3; i++ { - _, err = imgReg.Unpack(ctx, catalog) - require.Error(t, err, "unpack run ", i) - } -} diff --git a/internal/catalogd/source/unpacker.go b/internal/catalogd/source/unpacker.go deleted file mode 100644 index f0bb2449c..000000000 --- a/internal/catalogd/source/unpacker.go +++ /dev/null @@ -1,72 +0,0 @@ -package source - -import ( - "context" - "io/fs" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1" -) - -// TODO: This package is almost entirely copy/pasted from rukpak. We should look -// into whether it is possible to share this code. -// -// TODO: None of the rukpak CRD validations (both static and from the rukpak -// webhooks) related to the source are present here. Which of them do we need? - -// Unpacker unpacks catalog content, either synchronously or asynchronously and -// returns a Result, which conveys information about the progress of unpacking -// the catalog content. -// -// If a Source unpacks content asynchronously, it should register one or more -// watches with a controller to ensure that Bundles referencing this source -// can be reconciled as progress updates are available. -// -// For asynchronous Sources, multiple calls to Unpack should be made until the -// returned result includes state StateUnpacked. -// -// NOTE: A source is meant to be agnostic to specific catalog formats and -// specifications. A source should treat a catalog root directory as an opaque -// file tree and delegate catalog format concerns to catalog parsers. -type Unpacker interface { - Unpack(context.Context, *catalogdv1.ClusterCatalog) (*Result, error) - Cleanup(context.Context, *catalogdv1.ClusterCatalog) error -} - -// Result conveys progress information about unpacking catalog content. -type Result struct { - // Bundle contains the full filesystem of a catalog's root directory. - FS fs.FS - - // ResolvedSource is a reproducible view of a Bundle's Source. - // When possible, source implementations should return a ResolvedSource - // that pins the Source such that future fetches of the catalog content can - // be guaranteed to fetch the exact same catalog content as the original - // unpack. - // - // For example, resolved image sources should reference a container image - // digest rather than an image tag, and git sources should reference a - // commit hash rather than a branch or tag. - ResolvedSource *catalogdv1.ResolvedCatalogSource - - LastSuccessfulPollAttempt metav1.Time - - // State is the current state of unpacking the catalog content. - State State - - // Message is contextual information about the progress of unpacking the - // catalog content. - Message string - - // UnpackTime is the timestamp when the transition to the current State happened - UnpackTime time.Time -} - -type State string - -// StateUnpacked conveys that the catalog has been successfully unpacked. -const StateUnpacked State = "Unpacked" - -const UnpackCacheDir = "unpack" diff --git a/internal/operator-controller/catalogmetadata/client/client.go b/internal/operator-controller/catalogmetadata/client/client.go index 6407b2acc..d70fd083e 100644 --- a/internal/operator-controller/catalogmetadata/client/client.go +++ b/internal/operator-controller/catalogmetadata/client/client.go @@ -16,7 +16,7 @@ import ( "github.com/operator-framework/operator-registry/alpha/declcfg" catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" + httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" ) const ( diff --git a/internal/operator-controller/controllers/clusterextension_controller.go b/internal/operator-controller/controllers/clusterextension_controller.go index 5b8a56211..32e66ceac 100644 --- a/internal/operator-controller/controllers/clusterextension_controller.go +++ b/internal/operator-controller/controllers/clusterextension_controller.go @@ -56,7 +56,7 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/contentmanager" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" "github.com/operator-framework/operator-controller/internal/operator-controller/resolve" - rukpaksource "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/source" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" ) const ( @@ -67,8 +67,11 @@ const ( // ClusterExtensionReconciler reconciles a ClusterExtension object type ClusterExtensionReconciler struct { client.Client - Resolver resolve.Resolver - Unpacker rukpaksource.Unpacker + Resolver resolve.Resolver + + ImageCache imageutil.Cache + ImagePuller imageutil.Puller + Applier Applier Manager contentmanager.Manager controller crcontroller.Controller @@ -185,7 +188,7 @@ func checkForUnexpectedFieldChange(a, b ocv1.ClusterExtension) bool { 4. Install: The process of installing involves: 4.1 Converting the CSV in the bundle into a set of plain k8s objects. 4.2 Generating a chart from k8s objects. -4.3 Apply the release on cluster. +4.3 Store the release on cluster. */ //nolint:unparam func (r *ClusterExtensionReconciler) reconcile(ctx context.Context, ext *ocv1.ClusterExtension) (ctrl.Result, error) { @@ -250,16 +253,9 @@ func (r *ClusterExtensionReconciler) reconcile(ctx context.Context, ext *ocv1.Cl SetDeprecationStatus(ext, resolvedBundle.Name, resolvedDeprecation) resolvedBundleMetadata := bundleutil.MetadataFor(resolvedBundle.Name, *resolvedBundleVersion) - bundleSource := &rukpaksource.BundleSource{ - Name: ext.GetName(), - Type: rukpaksource.SourceTypeImage, - Image: &rukpaksource.ImageSource{ - Ref: resolvedBundle.Image, - }, - } l.Info("unpacking resolved bundle") - unpackResult, err := r.Unpacker.Unpack(ctx, bundleSource) + imageFS, _, _, err := r.ImagePuller.Pull(ctx, ext.GetName(), resolvedBundle.Image, r.ImageCache) if err != nil { // Wrap the error passed to this with the resolution information until we have successfully // installed since we intend for the progressing condition to replace the resolved condition @@ -269,10 +265,6 @@ func (r *ClusterExtensionReconciler) reconcile(ctx context.Context, ext *ocv1.Cl return ctrl.Result{}, err } - if unpackResult.State != rukpaksource.StateUnpacked { - panic(fmt.Sprintf("unexpected unpack state %q", unpackResult.State)) - } - objLbls := map[string]string{ labels.OwnerKindKey: ocv1.ClusterExtensionKind, labels.OwnerNameKey: ext.GetName(), @@ -295,7 +287,7 @@ func (r *ClusterExtensionReconciler) reconcile(ctx context.Context, ext *ocv1.Cl // to ensure exponential backoff can occur: // - Permission errors (it is not possible to watch changes to permissions. // The only way to eventually recover from permission errors is to keep retrying). - managedObjs, _, err := r.Applier.Apply(ctx, unpackResult.Bundle, ext, objLbls, storeLbls) + managedObjs, _, err := r.Applier.Apply(ctx, imageFS, ext, objLbls, storeLbls) if err != nil { setStatusProgressing(ext, wrapErrorWithResolutionInfo(resolvedBundleMetadata, err)) // Now that we're actually trying to install, use the error diff --git a/internal/operator-controller/controllers/clusterextension_controller_test.go b/internal/operator-controller/controllers/clusterextension_controller_test.go index 34fa36c59..bd6c031c4 100644 --- a/internal/operator-controller/controllers/clusterextension_controller_test.go +++ b/internal/operator-controller/controllers/clusterextension_controller_test.go @@ -34,7 +34,7 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/finalizers" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" "github.com/operator-framework/operator-controller/internal/operator-controller/resolve" - "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/source" + imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" ) // Describe: ClusterExtension Controller Test @@ -102,24 +102,24 @@ func TestClusterExtensionResolutionFails(t *testing.T) { func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { type testCase struct { name string - unpackErr error + pullErr error expectTerminal bool } for _, tc := range []testCase{ { - name: "non-terminal unpack failure", - unpackErr: errors.New("unpack failure"), + name: "non-terminal pull failure", + pullErr: errors.New("pull failure"), }, { - name: "terminal unpack failure", - unpackErr: reconcile.TerminalError(errors.New("terminal unpack failure")), + name: "terminal pull failure", + pullErr: reconcile.TerminalError(errors.New("terminal pull failure")), expectTerminal: true, }, } { t.Run(tc.name, func(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - err: tc.unpackErr, + reconciler.ImagePuller = &imageutil.MockPuller{ + Error: tc.pullErr, } ctx := context.Background() @@ -169,7 +169,7 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { isTerminal := errors.Is(err, reconcile.TerminalError(nil)) assert.Equal(t, tc.expectTerminal, isTerminal, "expected terminal error: %v, got: %v", tc.expectTerminal, isTerminal) - require.ErrorContains(t, err, tc.unpackErr.Error()) + require.ErrorContains(t, err, tc.pullErr.Error()) t.Log("By fetching updated cluster extension after reconcile") require.NoError(t, cl.Get(ctx, extKey, clusterExtension)) @@ -196,70 +196,10 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { } } -func TestClusterExtensionUnpackUnexpectedState(t *testing.T) { - cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: "unexpected", - }, - } - - ctx := context.Background() - extKey := types.NamespacedName{Name: fmt.Sprintf("cluster-extension-test-%s", rand.String(8))} - - t.Log("When the cluster extension specifies a channel with version that exist") - t.Log("By initializing cluster state") - pkgName := "prometheus" - pkgVer := "1.0.0" - pkgChan := "beta" - namespace := fmt.Sprintf("test-ns-%s", rand.String(8)) - serviceAccount := fmt.Sprintf("test-sa-%s", rand.String(8)) - - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{Name: extKey.Name}, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogSource{ - PackageName: pkgName, - Version: pkgVer, - Channels: []string{pkgChan}, - }, - }, - Namespace: namespace, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount, - }, - }, - } - err := cl.Create(ctx, clusterExtension) - require.NoError(t, err) - - t.Log("It sets resolution success status") - t.Log("By running reconcile") - reconciler.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bsemver.Version, *declcfg.Deprecation, error) { - v := bsemver.MustParse("1.0.0") - return &declcfg.Bundle{ - Name: "prometheus.v1.0.0", - Package: "prometheus", - Image: "quay.io/operatorhubio/prometheus@fake1.0.0", - }, &v, nil, nil - }) - - require.Panics(t, func() { - _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey}) - }, "reconciliation should panic on unknown unpack state") - - require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) -} - func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() @@ -389,11 +329,8 @@ func TestClusterExtensionServiceAccountNotFound(t *testing.T) { func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() @@ -488,11 +425,8 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) { func TestClusterExtensionManagerFailed(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() @@ -569,11 +503,8 @@ func TestClusterExtensionManagerFailed(t *testing.T) { func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() @@ -653,11 +584,8 @@ func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) { func TestClusterExtensionInstallationSucceeds(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() @@ -734,11 +662,8 @@ func TestClusterExtensionInstallationSucceeds(t *testing.T) { func TestClusterExtensionDeleteFinalizerFails(t *testing.T) { cl, reconciler := newClientAndReconciler(t) - reconciler.Unpacker = &MockUnpacker{ - result: &source.Result{ - State: source.StateUnpacked, - Bundle: fstest.MapFS{}, - }, + reconciler.ImagePuller = &imageutil.MockPuller{ + ImageFS: fstest.MapFS{}, } ctx := context.Background() diff --git a/internal/operator-controller/controllers/suite_test.go b/internal/operator-controller/controllers/suite_test.go index 669f6a94f..af93bf337 100644 --- a/internal/operator-controller/controllers/suite_test.go +++ b/internal/operator-controller/controllers/suite_test.go @@ -39,28 +39,8 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/contentmanager" cmcache "github.com/operator-framework/operator-controller/internal/operator-controller/contentmanager/cache" "github.com/operator-framework/operator-controller/internal/operator-controller/controllers" - "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/source" ) -// MockUnpacker is a mock of Unpacker interface -type MockUnpacker struct { - err error - result *source.Result -} - -// Unpack mocks the Unpack method -func (m *MockUnpacker) Unpack(_ context.Context, _ *source.BundleSource) (*source.Result, error) { - if m.err != nil { - return nil, m.err - } - return m.result, nil -} - -func (m *MockUnpacker) Cleanup(_ context.Context, _ *source.BundleSource) error { - // TODO implement me - panic("implement me") -} - func newClient(t *testing.T) client.Client { // TODO: this is a live client, which behaves differently than a cache client. // We may want to use a caching client instead to get closer to real behavior. diff --git a/internal/operator-controller/rukpak/source/containers_image.go b/internal/operator-controller/rukpak/source/containers_image.go deleted file mode 100644 index 1eebdfb56..000000000 --- a/internal/operator-controller/rukpak/source/containers_image.go +++ /dev/null @@ -1,299 +0,0 @@ -package source - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/containers/image/v5/copy" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/oci/layout" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/signature" - "github.com/containers/image/v5/types" - "github.com/go-logr/logr" - "github.com/opencontainers/go-digest" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" - fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" - imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" -) - -var insecurePolicy = []byte(`{"default":[{"type":"insecureAcceptAnything"}]}`) - -type ContainersImageRegistry struct { - BaseCachePath string - SourceContextFunc func(logger logr.Logger) (*types.SystemContext, error) -} - -func (i *ContainersImageRegistry) Unpack(ctx context.Context, bundle *BundleSource) (*Result, error) { - l := log.FromContext(ctx) - - if bundle.Type != SourceTypeImage { - panic(fmt.Sprintf("programmer error: source type %q is unable to handle specified bundle source type %q", SourceTypeImage, bundle.Type)) - } - - if bundle.Image == nil { - return nil, reconcile.TerminalError(fmt.Errorf("error parsing bundle, bundle %s has a nil image source", bundle.Name)) - } - - // Reload registries cache in case of configuration update - sysregistriesv2.InvalidateCache() - - srcCtx, err := i.SourceContextFunc(l) - if err != nil { - return nil, err - } - - res, err := i.unpack(ctx, bundle, srcCtx, l) - if err != nil { - // Log any CertificateVerificationErrors, and log Docker Certificates if necessary - if httputil.LogCertificateVerificationError(err, l) { - httputil.LogDockerCertificates(srcCtx.DockerCertPath, l) - } - } - return res, err -} - -func (i *ContainersImageRegistry) unpack(ctx context.Context, bundle *BundleSource, srcCtx *types.SystemContext, l logr.Logger) (*Result, error) { - ////////////////////////////////////////////////////// - // - // Resolve a canonical reference for the image. - // - ////////////////////////////////////////////////////// - imgRef, canonicalRef, _, err := resolveReferences(ctx, bundle.Image.Ref, srcCtx) - if err != nil { - return nil, err - } - - ////////////////////////////////////////////////////// - // - // Check if the image is already unpacked. If it is, - // return the unpacked directory. - // - ////////////////////////////////////////////////////// - unpackPath := i.unpackPath(bundle.Name, canonicalRef.Digest()) - if _, err := fsutil.GetDirectoryModTime(unpackPath); err == nil { - l.Info("image already unpacked", "ref", imgRef.String(), "digest", canonicalRef.Digest().String()) - return successResult(bundle.Name, unpackPath, canonicalRef), nil - } else if errors.Is(err, fsutil.ErrNotDirectory) { - if err := fsutil.DeleteReadOnlyRecursive(unpackPath); err != nil { - return nil, err - } - } else if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("error checking image already unpacked: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Create a docker reference for the source and an OCI - // layout reference for the destination, where we will - // temporarily store the image in order to unpack it. - // - // We use the OCI layout as a temporary storage because - // copy.Image can concurrently pull all the layers. - // - ////////////////////////////////////////////////////// - dockerRef, err := docker.NewReference(imgRef) - if err != nil { - return nil, fmt.Errorf("error creating source reference: %w", err) - } - - layoutDir, err := os.MkdirTemp("", fmt.Sprintf("oci-layout-%s", bundle.Name)) - if err != nil { - return nil, fmt.Errorf("error creating temporary directory: %w", err) - } - defer func() { - if err := os.RemoveAll(layoutDir); err != nil { - l.Error(err, "error removing temporary OCI layout directory") - } - }() - - layoutRef, err := layout.NewReference(layoutDir, canonicalRef.String()) - if err != nil { - return nil, fmt.Errorf("error creating reference: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Load an image signature policy and build - // a policy context for the image pull. - // - ////////////////////////////////////////////////////// - policyContext, err := loadPolicyContext(srcCtx, l) - if err != nil { - return nil, fmt.Errorf("error loading policy context: %w", err) - } - defer func() { - if err := policyContext.Destroy(); err != nil { - l.Error(err, "error destroying policy context") - } - }() - - ////////////////////////////////////////////////////// - // - // Pull the image from the source to the destination - // - ////////////////////////////////////////////////////// - if _, err := copy.Image(ctx, policyContext, layoutRef, dockerRef, ©.Options{ - SourceCtx: srcCtx, - // We use the OCI layout as a temporary storage and - // pushing signatures for OCI images is not supported - // so we remove the source signatures when copying. - // Signature validation will still be performed - // accordingly to a provided policy context. - RemoveSignatures: true, - }); err != nil { - return nil, fmt.Errorf("error copying image: %w", err) - } - l.Info("pulled image", "ref", imgRef.String(), "digest", canonicalRef.Digest().String()) - - ////////////////////////////////////////////////////// - // - // Mount the image we just pulled - // - ////////////////////////////////////////////////////// - if err := i.unpackImage(ctx, unpackPath, layoutRef, srcCtx); err != nil { - if cleanupErr := fsutil.DeleteReadOnlyRecursive(unpackPath); cleanupErr != nil { - err = errors.Join(err, cleanupErr) - } - return nil, fmt.Errorf("error unpacking image: %w", err) - } - - ////////////////////////////////////////////////////// - // - // Delete other images. They are no longer needed. - // - ////////////////////////////////////////////////////// - if err := i.deleteOtherImages(bundle.Name, canonicalRef.Digest()); err != nil { - return nil, fmt.Errorf("error deleting old images: %w", err) - } - - return successResult(bundle.Name, unpackPath, canonicalRef), nil -} - -func successResult(bundleName, unpackPath string, canonicalRef reference.Canonical) *Result { - return &Result{ - Bundle: os.DirFS(unpackPath), - ResolvedSource: &BundleSource{Type: SourceTypeImage, Name: bundleName, Image: &ImageSource{Ref: canonicalRef.String()}}, - State: StateUnpacked, - Message: fmt.Sprintf("unpacked %q successfully", canonicalRef), - } -} - -func (i *ContainersImageRegistry) Cleanup(_ context.Context, bundle *BundleSource) error { - return fsutil.DeleteReadOnlyRecursive(i.bundlePath(bundle.Name)) -} - -func (i *ContainersImageRegistry) bundlePath(bundleName string) string { - return filepath.Join(i.BaseCachePath, bundleName) -} - -func (i *ContainersImageRegistry) unpackPath(bundleName string, digest digest.Digest) string { - return filepath.Join(i.bundlePath(bundleName), digest.String()) -} - -func resolveReferences(ctx context.Context, ref string, sourceContext *types.SystemContext) (reference.Named, reference.Canonical, bool, error) { - imgRef, err := reference.ParseNamed(ref) - if err != nil { - return nil, nil, false, reconcile.TerminalError(fmt.Errorf("error parsing image reference %q: %w", ref, err)) - } - - canonicalRef, isCanonical, err := resolveCanonicalRef(ctx, imgRef, sourceContext) - if err != nil { - return nil, nil, false, fmt.Errorf("error resolving canonical reference: %w", err) - } - return imgRef, canonicalRef, isCanonical, nil -} - -func resolveCanonicalRef(ctx context.Context, imgRef reference.Named, imageCtx *types.SystemContext) (reference.Canonical, bool, error) { - if canonicalRef, ok := imgRef.(reference.Canonical); ok { - return canonicalRef, true, nil - } - - srcRef, err := docker.NewReference(imgRef) - if err != nil { - return nil, false, reconcile.TerminalError(fmt.Errorf("error creating reference: %w", err)) - } - - imgSrc, err := srcRef.NewImageSource(ctx, imageCtx) - if err != nil { - return nil, false, fmt.Errorf("error creating image source: %w", err) - } - defer imgSrc.Close() - - imgManifestData, _, err := imgSrc.GetManifest(ctx, nil) - if err != nil { - return nil, false, fmt.Errorf("error getting manifest: %w", err) - } - imgDigest, err := manifest.Digest(imgManifestData) - if err != nil { - return nil, false, fmt.Errorf("error getting digest of manifest: %w", err) - } - canonicalRef, err := reference.WithDigest(reference.TrimNamed(imgRef), imgDigest) - if err != nil { - return nil, false, fmt.Errorf("error creating canonical reference: %w", err) - } - return canonicalRef, false, nil -} - -func loadPolicyContext(sourceContext *types.SystemContext, l logr.Logger) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(sourceContext) - // TODO: there are security implications to silently moving to an insecure policy - // tracking issue: https://github.com/operator-framework/operator-controller/issues/1622 - if err != nil { - l.Info("no default policy found, using insecure policy") - policy, err = signature.NewPolicyFromBytes(insecurePolicy) - } - if err != nil { - return nil, fmt.Errorf("error loading default policy: %w", err) - } - return signature.NewPolicyContext(policy) -} - -func (i *ContainersImageRegistry) unpackImage(ctx context.Context, unpackPath string, imageReference types.ImageReference, sourceContext *types.SystemContext) error { - img, err := imageReference.NewImage(ctx, sourceContext) - if err != nil { - return fmt.Errorf("error reading image: %w", err) - } - defer func() { - if err := img.Close(); err != nil { - panic(err) - } - }() - - layoutSrc, err := imageReference.NewImageSource(ctx, sourceContext) - if err != nil { - return fmt.Errorf("error creating image source: %w", err) - } - defer func() { - if err := layoutSrc.Close(); err != nil { - panic(err) - } - }() - return imageutil.ApplyLayersToDisk(ctx, unpackPath, img, layoutSrc, imageutil.ForceOwnershipRWX()) -} - -func (i *ContainersImageRegistry) deleteOtherImages(bundleName string, digestToKeep digest.Digest) error { - bundlePath := i.bundlePath(bundleName) - imgDirs, err := os.ReadDir(bundlePath) - if err != nil { - return fmt.Errorf("error reading image directories: %w", err) - } - for _, imgDir := range imgDirs { - if imgDir.Name() == digestToKeep.String() { - continue - } - imgDirPath := filepath.Join(bundlePath, imgDir.Name()) - if err := fsutil.DeleteReadOnlyRecursive(imgDirPath); err != nil { - return fmt.Errorf("error removing image directory: %w", err) - } - } - return nil -} diff --git a/internal/operator-controller/rukpak/source/containers_image_test.go b/internal/operator-controller/rukpak/source/containers_image_test.go deleted file mode 100644 index 7196a7774..000000000 --- a/internal/operator-controller/rukpak/source/containers_image_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package source_test - -import ( - "context" - "fmt" - "io/fs" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "testing" - - "github.com/BurntSushi/toml" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/go-logr/logr" - "github.com/google/go-containerregistry/pkg/crane" - "github.com/google/go-containerregistry/pkg/registry" - "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/source" - fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" -) - -const ( - testFileName string = "test-file" - testFileContents string = "test-content" -) - -func TestUnpackValidInsecure(t *testing.T) { - imageTagRef, _, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageTagRef.String(), - }, - } - - oldBundlePath := filepath.Join(unpacker.BaseCachePath, bundleSource.Name, "old") - err := os.MkdirAll(oldBundlePath, 0755) - require.NoError(t, err) - - // Attempt to pull and unpack the image - result, err := unpacker.Unpack(context.Background(), bundleSource) - require.NoError(t, err) - require.NotNil(t, result) - assert.Equal(t, source.StateUnpacked, result.State) - - require.NoDirExists(t, oldBundlePath) - - unpackedFile, err := fs.ReadFile(result.Bundle, testFileName) - require.NoError(t, err) - // Ensure the unpacked file matches the source content - assert.Equal(t, []byte(testFileContents), unpackedFile) - assert.NoError(t, unpacker.Cleanup(context.Background(), bundleSource)) -} - -func TestUnpackValidUsesCache(t *testing.T) { - _, imageDigestRef, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageDigestRef), - } - - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageDigestRef.String(), - }, - } - - // Populate the bundle cache with a folder that is not actually part of the image - testCacheFilePath := filepath.Join(unpacker.BaseCachePath, bundleSource.Name, imageDigestRef.Digest().String(), "test-folder") - require.NoError(t, os.MkdirAll(testCacheFilePath, 0700)) - - // Attempt to pull and unpack the image - result, err := unpacker.Unpack(context.Background(), bundleSource) - require.NoError(t, err) - require.NotNil(t, result) - assert.Equal(t, source.StateUnpacked, result.State) - - // Make sure the original contents of the cache are still present. If the cached contents - // were not used, we would expect the original contents to be removed. - assert.DirExists(t, testCacheFilePath) - assert.NoError(t, unpacker.Cleanup(context.Background(), bundleSource)) -} - -func TestUnpackCacheCheckError(t *testing.T) { - imageTagRef, imageDigestRef, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageTagRef.String(), - }, - } - - // Create the unpack path and restrict its permissions - unpackPath := filepath.Join(unpacker.BaseCachePath, bundleSource.Name, imageDigestRef.Digest().String()) - require.NoError(t, os.MkdirAll(unpackPath, os.ModePerm)) - require.NoError(t, os.Chmod(unpacker.BaseCachePath, 0000)) - defer func() { - require.NoError(t, os.Chmod(unpacker.BaseCachePath, 0755)) - }() - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - assert.ErrorContains(t, err, "permission denied") -} - -func TestUnpackNameOnlyImageReference(t *testing.T) { - imageTagRef, _, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: reference.TrimNamed(imageTagRef).String(), - }, - } - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - require.ErrorContains(t, err, "tag or digest is needed") - assert.ErrorIs(t, err, reconcile.TerminalError(nil)) -} - -func TestUnpackUnservedTaggedImageReference(t *testing.T) { - imageTagRef, _, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - // Use a valid reference that is not served - Ref: fmt.Sprintf("%s:unserved-tag", reference.TrimNamed(imageTagRef)), - }, - } - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - assert.ErrorContains(t, err, "manifest unknown") -} - -func TestUnpackUnservedCanonicalImageReference(t *testing.T) { - imageTagRef, imageDigestRef, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - - origRef := imageDigestRef.String() - nonExistentRef := origRef[:len(origRef)-1] + "1" - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - // Use a valid reference that is not served - Ref: nonExistentRef, - }, - } - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - assert.ErrorContains(t, err, "manifest unknown") -} - -func TestUnpackInvalidSourceType(t *testing.T) { - unpacker := &source.ContainersImageRegistry{} - // Create BundleSource with invalid source type - bundleSource := &source.BundleSource{ - Type: "invalid", - } - - shouldPanic := func() { - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - if err != nil { - t.Error("func should have panicked") - } - } - assert.Panics(t, shouldPanic) -} - -func TestUnpackInvalidNilImage(t *testing.T) { - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - } - // Create BundleSource with nil Image - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: nil, - } - - // Attempt to unpack - result, err := unpacker.Unpack(context.Background(), bundleSource) - assert.Nil(t, result) - require.ErrorContains(t, err, "nil image source") - require.ErrorIs(t, err, reconcile.TerminalError(nil)) - assert.NoDirExists(t, filepath.Join(unpacker.BaseCachePath, bundleSource.Name)) -} - -func TestUnpackInvalidImageRef(t *testing.T) { - unpacker := &source.ContainersImageRegistry{ - SourceContextFunc: func(logr.Logger) (*types.SystemContext, error) { - return &types.SystemContext{}, nil - }, - } - // Create BundleSource with malformed image reference - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: "invalid image ref", - }, - } - - // Attempt to unpack - result, err := unpacker.Unpack(context.Background(), bundleSource) - assert.Nil(t, result) - require.ErrorContains(t, err, "error parsing image reference") - require.ErrorIs(t, err, reconcile.TerminalError(nil)) - assert.NoDirExists(t, filepath.Join(unpacker.BaseCachePath, bundleSource.Name)) -} - -func TestUnpackUnexpectedFile(t *testing.T) { - imageTagRef, imageDigestRef, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageTagRef.String(), - }, - } - - // Create an unpack path that is a file - unpackPath := filepath.Join(unpacker.BaseCachePath, bundleSource.Name, imageDigestRef.Digest().String()) - require.NoError(t, os.MkdirAll(filepath.Dir(unpackPath), 0700)) - require.NoError(t, os.WriteFile(unpackPath, []byte{}, 0600)) - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - require.NoError(t, err) - - // Ensure unpack path is now a directory - stat, err := os.Stat(unpackPath) - require.NoError(t, err) - require.True(t, stat.IsDir()) - - // Unset read-only to allow cleanup - require.NoError(t, fsutil.SetWritableRecursive(unpackPath)) -} - -func TestUnpackCopySucceedsMountFails(t *testing.T) { - imageTagRef, _, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageTagRef.String(), - }, - } - - // Create an unpack path that is a non-writable directory - bundleDir := filepath.Join(unpacker.BaseCachePath, bundleSource.Name) - require.NoError(t, os.MkdirAll(bundleDir, 0000)) - - // Attempt to pull and unpack the image - _, err := unpacker.Unpack(context.Background(), bundleSource) - assert.ErrorContains(t, err, "permission denied") -} - -func TestCleanup(t *testing.T) { - imageTagRef, _, cleanup := setupRegistry(t) - defer cleanup() - - unpacker := &source.ContainersImageRegistry{ - BaseCachePath: t.TempDir(), - SourceContextFunc: buildPullContextfunc(t, imageTagRef), - } - bundleSource := &source.BundleSource{ - Name: "test-bundle", - Type: source.SourceTypeImage, - Image: &source.ImageSource{ - Ref: imageTagRef.String(), - }, - } - - // Create an unpack path for the bundle - bundleDir := filepath.Join(unpacker.BaseCachePath, bundleSource.Name) - require.NoError(t, os.MkdirAll(bundleDir, 0755)) - - // Clean up the bundle - err := unpacker.Cleanup(context.Background(), bundleSource) - require.NoError(t, err) - assert.NoDirExists(t, bundleDir) -} - -func setupRegistry(t *testing.T) (reference.NamedTagged, reference.Canonical, func()) { - server := httptest.NewServer(registry.New()) - serverURL, err := url.Parse(server.URL) - require.NoError(t, err) - - // Generate an image with file contents - img, err := crane.Image(map[string][]byte{testFileName: []byte(testFileContents)}) - require.NoError(t, err) - - imageTagRef, err := newReference(serverURL.Host, "test-repo/test-image", "test-tag") - require.NoError(t, err) - - imgDigest, err := img.Digest() - require.NoError(t, err) - - imageDigestRef, err := reference.WithDigest(reference.TrimNamed(imageTagRef), digest.Digest(imgDigest.String())) - require.NoError(t, err) - - require.NoError(t, crane.Push(img, imageTagRef.String())) - - cleanup := func() { - server.Close() - } - return imageTagRef, imageDigestRef, cleanup -} - -func newReference(host, repo, tag string) (reference.NamedTagged, error) { - ref, err := reference.ParseNamed(fmt.Sprintf("%s/%s", host, repo)) - if err != nil { - return nil, err - } - return reference.WithTag(ref, tag) -} - -func buildPullContextfunc(t *testing.T, ref reference.Named) func(_ logr.Logger) (*types.SystemContext, error) { - return func(_ logr.Logger) (*types.SystemContext, error) { - // Build a containers/image context that allows pulling from the test registry insecurely - registriesConf := sysregistriesv2.V2RegistriesConf{Registries: []sysregistriesv2.Registry{ - { - Prefix: reference.Domain(ref), - Endpoint: sysregistriesv2.Endpoint{ - Location: reference.Domain(ref), - Insecure: true, - }, - }, - }} - configDir := t.TempDir() - registriesConfPath := filepath.Join(configDir, "registries.conf") - f, err := os.Create(registriesConfPath) - require.NoError(t, err) - - enc := toml.NewEncoder(f) - require.NoError(t, enc.Encode(registriesConf)) - require.NoError(t, f.Close()) - - return &types.SystemContext{ - SystemRegistriesConfPath: registriesConfPath, - }, nil - } -} diff --git a/internal/operator-controller/rukpak/source/unpacker.go b/internal/operator-controller/rukpak/source/unpacker.go deleted file mode 100644 index 013f96989..000000000 --- a/internal/operator-controller/rukpak/source/unpacker.go +++ /dev/null @@ -1,74 +0,0 @@ -package source - -import ( - "context" - "io/fs" -) - -// SourceTypeImage is the identifier for image-type bundle sources -const SourceTypeImage SourceType = "image" - -type ImageSource struct { - // Ref contains the reference to a container image containing Bundle contents. - Ref string -} - -// Unpacker unpacks bundle content, either synchronously or asynchronously and -// returns a Result, which conveys information about the progress of unpacking -// the bundle content. -// -// If a Source unpacks content asynchronously, it should register one or more -// watches with a controller to ensure that Bundles referencing this source -// can be reconciled as progress updates are available. -// -// For asynchronous Sources, multiple calls to Unpack should be made until the -// returned result includes state StateUnpacked. -// -// NOTE: A source is meant to be agnostic to specific bundle formats and -// specifications. A source should treat a bundle root directory as an opaque -// file tree and delegate bundle format concerns to bundle parsers. -type Unpacker interface { - Unpack(context.Context, *BundleSource) (*Result, error) - Cleanup(context.Context, *BundleSource) error -} - -// Result conveys progress information about unpacking bundle content. -type Result struct { - // Bundle contains the full filesystem of a bundle's root directory. - Bundle fs.FS - - // ResolvedSource is a reproducible view of a Bundle's Source. - // When possible, source implementations should return a ResolvedSource - // that pins the Source such that future fetches of the bundle content can - // be guaranteed to fetch the exact same bundle content as the original - // unpack. - // - // For example, resolved image sources should reference a container image - // digest rather than an image tag, and git sources should reference a - // commit hash rather than a branch or tag. - ResolvedSource *BundleSource - - // State is the current state of unpacking the bundle content. - State State - - // Message is contextual information about the progress of unpacking the - // bundle content. - Message string -} - -type State string - -const ( - // StateUnpacked conveys that the bundle has been successfully unpacked. - StateUnpacked State = "Unpacked" -) - -type SourceType string - -type BundleSource struct { - Name string - // Type defines the kind of Bundle content being sourced. - Type SourceType - // Image is the bundle image that backs the content of this bundle. - Image *ImageSource -} diff --git a/internal/shared/util/error/terminal.go b/internal/shared/util/error/terminal.go new file mode 100644 index 000000000..cd70d535f --- /dev/null +++ b/internal/shared/util/error/terminal.go @@ -0,0 +1,10 @@ +package error + +import "sigs.k8s.io/controller-runtime/pkg/reconcile" + +func WrapTerminal(err error, isTerminal bool) error { + if !isTerminal || err == nil { + return err + } + return reconcile.TerminalError(err) +} diff --git a/internal/operator-controller/httputil/certlog.go b/internal/shared/util/http/certlog.go similarity index 99% rename from internal/operator-controller/httputil/certlog.go rename to internal/shared/util/http/certlog.go index beeb3e055..794630d98 100644 --- a/internal/operator-controller/httputil/certlog.go +++ b/internal/shared/util/http/certlog.go @@ -1,4 +1,4 @@ -package httputil +package http import ( "crypto/tls" diff --git a/internal/operator-controller/httputil/certpoolwatcher.go b/internal/shared/util/http/certpoolwatcher.go similarity index 99% rename from internal/operator-controller/httputil/certpoolwatcher.go rename to internal/shared/util/http/certpoolwatcher.go index 646c09b00..7f95449e9 100644 --- a/internal/operator-controller/httputil/certpoolwatcher.go +++ b/internal/shared/util/http/certpoolwatcher.go @@ -1,4 +1,4 @@ -package httputil +package http import ( "crypto/x509" diff --git a/internal/operator-controller/httputil/certpoolwatcher_test.go b/internal/shared/util/http/certpoolwatcher_test.go similarity index 95% rename from internal/operator-controller/httputil/certpoolwatcher_test.go rename to internal/shared/util/http/certpoolwatcher_test.go index 0a5c2974b..ca13a478b 100644 --- a/internal/operator-controller/httputil/certpoolwatcher_test.go +++ b/internal/shared/util/http/certpoolwatcher_test.go @@ -1,4 +1,4 @@ -package httputil_test +package http_test import ( "context" @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" + httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" ) func createCert(t *testing.T, name string) { diff --git a/internal/operator-controller/httputil/certutil.go b/internal/shared/util/http/certutil.go similarity index 98% rename from internal/operator-controller/httputil/certutil.go rename to internal/shared/util/http/certutil.go index d6732e7d8..864d71c65 100644 --- a/internal/operator-controller/httputil/certutil.go +++ b/internal/shared/util/http/certutil.go @@ -1,4 +1,4 @@ -package httputil +package http import ( "crypto/x509" diff --git a/internal/operator-controller/httputil/certutil_test.go b/internal/shared/util/http/certutil_test.go similarity index 67% rename from internal/operator-controller/httputil/certutil_test.go rename to internal/shared/util/http/certutil_test.go index d37383c90..f998b61d5 100644 --- a/internal/operator-controller/httputil/certutil_test.go +++ b/internal/shared/util/http/certutil_test.go @@ -1,4 +1,4 @@ -package httputil_test +package http_test import ( "context" @@ -7,7 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/require" - "github.com/operator-framework/operator-controller/internal/operator-controller/httputil" + httputil "github.com/operator-framework/operator-controller/internal/shared/util/http" ) // The "good" test consists of 3 Amazon Root CAs, along with a "PRIVATE KEY" in one of the files @@ -17,9 +17,9 @@ func TestNewCertPool(t *testing.T) { dir string msg string }{ - {"../../../testdata/certs/", `no certificates found in "../../../testdata/certs/"`}, - {"../../../testdata/certs/good", ""}, - {"../../../testdata/certs/empty", `no certificates found in "../../../testdata/certs/empty"`}, + {"../../../../testdata/certs/", `no certificates found in "../../../../testdata/certs/"`}, + {"../../../../testdata/certs/good", ""}, + {"../../../../testdata/certs/empty", `no certificates found in "../../../../testdata/certs/empty"`}, } log, _ := logr.FromContext(context.Background()) diff --git a/internal/operator-controller/httputil/httputil.go b/internal/shared/util/http/httputil.go similarity index 96% rename from internal/operator-controller/httputil/httputil.go rename to internal/shared/util/http/httputil.go index d620866e4..f5a982d2d 100644 --- a/internal/operator-controller/httputil/httputil.go +++ b/internal/shared/util/http/httputil.go @@ -1,4 +1,4 @@ -package httputil +package http import ( "crypto/tls" diff --git a/internal/shared/util/image/cache.go b/internal/shared/util/image/cache.go new file mode 100644 index 000000000..fbbb52bd8 --- /dev/null +++ b/internal/shared/util/image/cache.go @@ -0,0 +1,185 @@ +package image + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "iter" + "os" + "path/filepath" + "slices" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/opencontainers/go-digest" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "sigs.k8s.io/controller-runtime/pkg/log" + + errorutil "github.com/operator-framework/operator-controller/internal/shared/util/error" + fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" +) + +type LayerData struct { + Reader io.Reader + Index int + Err error +} + +type Cache interface { + Fetch(context.Context, string, reference.Canonical) (fs.FS, time.Time, error) + Store(context.Context, string, reference.Named, reference.Canonical, ocispecv1.Image, iter.Seq[LayerData]) (fs.FS, time.Time, error) + Delete(context.Context, string) error + GarbageCollect(context.Context, string, reference.Canonical) error +} + +const ConfigDirLabel = "operators.operatorframework.io.index.configs.v1" + +func CatalogCache(basePath string) Cache { + return &diskCache{ + basePath: basePath, + filterFunc: filterForCatalogImage(), + } +} + +func filterForCatalogImage() func(ctx context.Context, srcRef reference.Named, image ocispecv1.Image) (archive.Filter, error) { + return func(ctx context.Context, srcRef reference.Named, image ocispecv1.Image) (archive.Filter, error) { + _, specIsCanonical := srcRef.(reference.Canonical) + + dirToUnpack, ok := image.Config.Labels[ConfigDirLabel] + if !ok { + // If the spec is a tagged keep, retries could end up resolving a new digest, where the label + // might show up. If the spec is canonical, no amount of retries will make the label appear. + // Therefore, we treat the error as terminal if the reference from the spec is canonical. + return nil, errorutil.WrapTerminal(fmt.Errorf("catalog image is missing the required label %q", ConfigDirLabel), specIsCanonical) + } + + return allFilters( + onlyPath(dirToUnpack), + forceOwnershipRWX(), + ), nil + } +} + +func BundleCache(basePath string) Cache { + return &diskCache{ + basePath: basePath, + filterFunc: filterForBundleImage(), + } +} + +func filterForBundleImage() func(ctx context.Context, srcRef reference.Named, image ocispecv1.Image) (archive.Filter, error) { + return func(ctx context.Context, srcRef reference.Named, image ocispecv1.Image) (archive.Filter, error) { + return forceOwnershipRWX(), nil + } +} + +type diskCache struct { + basePath string + filterFunc func(context.Context, reference.Named, ocispecv1.Image) (archive.Filter, error) +} + +func (a *diskCache) Fetch(ctx context.Context, ownerID string, canonicalRef reference.Canonical) (fs.FS, time.Time, error) { + l := log.FromContext(ctx) + unpackPath := a.unpackPath(ownerID, canonicalRef.Digest()) + modTime, err := fsutil.GetDirectoryModTime(unpackPath) + switch { + case errors.Is(err, os.ErrNotExist): + return nil, time.Time{}, nil + case errors.Is(err, fsutil.ErrNotDirectory): + l.Info("unpack path is not a directory; attempting to delete", "path", unpackPath) + return nil, time.Time{}, fsutil.DeleteReadOnlyRecursive(unpackPath) + case err != nil: + return nil, time.Time{}, fmt.Errorf("error checking image content already unpacked: %w", err) + } + l.Info("image already unpacked") + return os.DirFS(a.unpackPath(ownerID, canonicalRef.Digest())), modTime, nil +} + +func (a *diskCache) ownerIDPath(ownerID string) string { + return filepath.Join(a.basePath, ownerID) +} + +func (a *diskCache) unpackPath(ownerID string, digest digest.Digest) string { + return filepath.Join(a.ownerIDPath(ownerID), digest.String()) +} + +func (a *diskCache) Store(ctx context.Context, ownerID string, srcRef reference.Named, canonicalRef reference.Canonical, imgCfg ocispecv1.Image, layers iter.Seq[LayerData]) (fs.FS, time.Time, error) { + var applyOpts []archive.ApplyOpt + if a.filterFunc != nil { + filter, err := a.filterFunc(ctx, srcRef, imgCfg) + if err != nil { + return nil, time.Time{}, err + } + applyOpts = append(applyOpts, archive.WithFilter(filter)) + } + + dest := a.unpackPath(ownerID, canonicalRef.Digest()) + if err := fsutil.EnsureEmptyDirectory(dest, 0700); err != nil { + return nil, time.Time{}, fmt.Errorf("error ensuring empty unpack directory: %w", err) + } + + if err := func() error { + l := log.FromContext(ctx) + l.Info("unpacking image", "path", dest) + for layer := range layers { + if layer.Err != nil { + return fmt.Errorf("error reading layer[%d]: %w", layer.Index, layer.Err) + } + if _, err := archive.Apply(ctx, dest, layer.Reader, applyOpts...); err != nil { + return fmt.Errorf("error applying layer[%d]: %w", layer.Index, err) + } + l.Info("applied layer", "layer", layer.Index) + } + if err := fsutil.SetReadOnlyRecursive(dest); err != nil { + return fmt.Errorf("error making unpack directory read-only: %w", err) + } + return nil + }(); err != nil { + return nil, time.Time{}, errors.Join(err, fsutil.DeleteReadOnlyRecursive(dest)) + } + modTime, err := fsutil.GetDirectoryModTime(dest) + if err != nil { + return nil, time.Time{}, fmt.Errorf("error getting mod time of unpack directory: %w", err) + } + return os.DirFS(dest), modTime, nil +} + +func (a *diskCache) Delete(_ context.Context, ownerID string) error { + return fsutil.DeleteReadOnlyRecursive(a.ownerIDPath(ownerID)) +} + +func (a *diskCache) GarbageCollect(_ context.Context, ownerID string, keep reference.Canonical) error { + ownerIDPath := a.ownerIDPath(ownerID) + dirEntries, err := os.ReadDir(ownerIDPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("error reading owner directory: %w", err) + } + + foundKeep := false + dirEntries = slices.DeleteFunc(dirEntries, func(entry os.DirEntry) bool { + found := entry.Name() == keep.Digest().String() + if found { + foundKeep = true + } + return found + }) + + for _, dirEntry := range dirEntries { + if err := fsutil.DeleteReadOnlyRecursive(filepath.Join(ownerIDPath, dirEntry.Name())); err != nil { + return fmt.Errorf("error removing entry %s: %w", dirEntry.Name(), err) + } + } + + if !foundKeep { + if err := fsutil.DeleteReadOnlyRecursive(ownerIDPath); err != nil { + return fmt.Errorf("error deleting unused owner data: %w", err) + } + } + return nil +} diff --git a/internal/shared/util/image/cache_test.go b/internal/shared/util/image/cache_test.go new file mode 100644 index 000000000..a5b644feb --- /dev/null +++ b/internal/shared/util/image/cache_test.go @@ -0,0 +1,621 @@ +package image + +import ( + "archive/tar" + "context" + "errors" + "io" + "io/fs" + "iter" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + "testing/fstest" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containers/image/v5/docker/reference" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" +) + +func TestDiskCacheFetch(t *testing.T) { + const myOwner = "myOwner" + myRef := mustParseCanonical(t, "my.registry.io/ns/repo@sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03") + + testCases := []struct { + name string + ownerID string + ref reference.Canonical + setup func(*testing.T, *diskCache) + expect func(*testing.T, *diskCache, fs.FS, time.Time, error) + }{ + { + name: "all zero-values when owner does not exist", + ownerID: myOwner, + ref: myRef, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.NoError(t, err) + }, + }, + { + name: "all zero values when digest does not exist for owner", + ownerID: myOwner, + ref: myRef, + setup: func(t *testing.T, cache *diskCache) { + require.NoError(t, os.MkdirAll(cache.ownerIDPath(myOwner), 0777)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.NoError(t, err) + }, + }, + { + name: "owners do not share data", + ownerID: myOwner, + ref: myRef, + setup: func(t *testing.T, cache *diskCache) { + require.NoError(t, os.MkdirAll(cache.unpackPath("otherOwner", myRef.Digest()), 0777)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.NoError(t, err) + }, + }, + { + name: "permission error when owner directory cannot be read", + ownerID: myOwner, + ref: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerIDPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerIDPath, 0700)) + require.NoError(t, os.Chmod(ownerIDPath, 0000)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.ErrorIs(t, err, os.ErrPermission) + }, + }, + { + name: "unexpected contents for a reference are deleted, zero values returned", + ownerID: myOwner, + ref: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerIDPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerIDPath, 0700)) + require.NoError(t, os.WriteFile(cache.unpackPath(myOwner, myRef.Digest()), []byte{}, 0600)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.NoError(t, err) // nolint:testifylint + assert.NoFileExists(t, cache.unpackPath(myOwner, myRef.Digest())) + }, + }, + { + name: "digest exists for owner", + ownerID: myOwner, + ref: myRef, + setup: func(t *testing.T, cache *diskCache) { + unpackPath := cache.unpackPath(myOwner, myRef.Digest()) + require.NoError(t, os.MkdirAll(cache.ownerIDPath(myOwner), 0700)) + require.NoError(t, os.MkdirAll(unpackPath, 0700)) + require.NoError(t, os.WriteFile(filepath.Join(unpackPath, "my-file"), []byte("my-data"), 0600)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + + // Verify fsys + data, err := fs.ReadFile(fsys, "my-file") + require.NoError(t, err) + assert.Equal(t, "my-data", string(data)) + + // Verify modTime + dirStat, err := os.Stat(cache.unpackPath(myOwner, myRef.Digest())) + assert.Equal(t, dirStat.ModTime(), modTime) + + // Verify no error + assert.NoError(t, err) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dc := &diskCache{basePath: t.TempDir()} + if tc.setup != nil { + tc.setup(t, dc) + } + fsys, modTime, err := dc.Fetch(context.Background(), tc.ownerID, tc.ref) + require.NotNil(t, tc.expect, "test case must include an expect function") + tc.expect(t, dc, fsys, modTime, err) + require.NoError(t, fsutil.SetWritableRecursive(dc.basePath)) + }) + } +} + +func TestDiskCacheStore(t *testing.T) { + const myOwner = "myOwner" + myCanonicalRef := mustParseCanonical(t, "my.registry.io/ns/repo@sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03") + myTaggedRef, err := reference.WithTag(reference.TrimNamed(myCanonicalRef), "test-tag") + require.NoError(t, err) + + myUID := os.Getuid() + myGID := os.Getgid() + + testCases := []struct { + name string + ownerID string + srcRef reference.Named + canonicalRef reference.Canonical + imgConfig ocispecv1.Image + layers iter.Seq[LayerData] + filterFunc func(context.Context, reference.Named, ocispecv1.Image) (archive.Filter, error) + setup func(*testing.T, *diskCache) + expect func(*testing.T, *diskCache, fs.FS, time.Time, error) + }{ + { + name: "returns error when filter func fails", + filterFunc: func(context.Context, reference.Named, ocispecv1.Image) (archive.Filter, error) { + return nil, errors.New("filterfunc error") + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Zero(t, modTime) + assert.ErrorContains(t, err, "filterfunc error") + }, + }, + { + name: "returns permission error when base path is not writeable", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + setup: func(t *testing.T, cache *diskCache) { + require.NoError(t, os.Chmod(cache.basePath, 0400)) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.ErrorIs(t, err, os.ErrPermission) + }, + }, + { + name: "returns error if layer data contains error", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + layers: func(yield func(LayerData) bool) { + yield(LayerData{Err: errors.New("layer error")}) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.ErrorContains(t, err, "layer error") + }, + }, + { + name: "returns error if layer read returns error", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + layers: func(yield func(LayerData) bool) { + yield(LayerData{Reader: strings.NewReader("hello :)")}) + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + assert.ErrorContains(t, err, "error applying layer") + }, + }, + { + name: "no error and an empty FS returned when there are no layers", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + layers: layerFSIterator(), + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + + entries, err := fs.ReadDir(fsys, ".") + assert.NoError(t, err) // nolint:testifylint + assert.Empty(t, entries) + }, + }, + { + name: "multiple layers with whiteouts are stored as expected", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + layers: layerFSIterator( + fstest.MapFS{ + "foo": &fstest.MapFile{Data: []byte("foo_layer1"), Mode: 0600}, + "bar": &fstest.MapFile{Data: []byte("bar_layer1"), Mode: 0600}, + "fizz": &fstest.MapFile{Data: []byte("fizz_layer1"), Mode: 0600}, + }, + fstest.MapFS{ + "foo": &fstest.MapFile{Data: []byte("foo_layer2"), Mode: 0600}, + ".wh.bar": &fstest.MapFile{Mode: 0600}, + "baz": &fstest.MapFile{Data: []byte("baz_layer2"), Mode: 0600}, + }, + ), + filterFunc: func(ctx context.Context, named reference.Named, image ocispecv1.Image) (archive.Filter, error) { + return forceOwnershipRWX(), nil + }, + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + require.NotNil(t, fsys) + + fooData, fooErr := fs.ReadFile(fsys, "foo") + barData, barErr := fs.ReadFile(fsys, "bar") + bazData, bazErr := fs.ReadFile(fsys, "baz") + fizzData, fizzErr := fs.ReadFile(fsys, "fizz") + + assert.Equal(t, "foo_layer2", string(fooData)) + assert.NoError(t, fooErr) //nolint:testifylint + + assert.Equal(t, "baz_layer2", string(bazData)) + assert.NoError(t, bazErr) //nolint:testifylint + + assert.Empty(t, barData) + assert.ErrorIs(t, barErr, fs.ErrNotExist) //nolint:testifylint + + assert.Equal(t, "fizz_layer1", string(fizzData)) + assert.NoError(t, fizzErr) //nolint:testifylint + }, + }, + { + name: "uses filter", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + filterFunc: func(context.Context, reference.Named, ocispecv1.Image) (archive.Filter, error) { + return allFilters( + func(h *tar.Header) (bool, error) { + if h.Name == "foo" { + return false, nil + } + h.Name += ".txt" + return true, nil + }, + forceOwnershipRWX(), + ), nil + }, + layers: layerFSIterator( + fstest.MapFS{ + "foo": &fstest.MapFile{Data: []byte("foo_layer1"), Mode: 0600}, + "bar": &fstest.MapFile{Data: []byte("bar_layer1"), Mode: 0600}, + }, + ), + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + + _, fooErr := fs.Stat(fsys, "foo") + assert.ErrorIs(t, fooErr, fs.ErrNotExist) //nolint:testifylint + + _, barErr := fs.Stat(fsys, "bar") + assert.ErrorIs(t, barErr, fs.ErrNotExist) //nolint:testifylint + + _, barTxtStat := fs.Stat(fsys, "bar.txt") + assert.NoError(t, barTxtStat) //nolint:testifylint + }, + }, + { + name: "uses bundle filter", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + filterFunc: filterForBundleImage(), + layers: layerFSIterator( + fstest.MapFS{ + "foo": &fstest.MapFile{Data: []byte("foo_layer1"), Mode: 0000}, + "bar": &fstest.MapFile{Data: []byte("bar_layer1"), Mode: 0000}, + }, + ), + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + + fooStat, fooErr := fs.Stat(fsys, "foo") + barStat, barErr := fs.Stat(fsys, "bar") + + // You may have expected 0700 here, but after the files are stored, + // the cache recursively sets them as read-only. The fact that these + // files even exist proves that the filter executed properly because + // they are UID/GID: 0/0 in the fs layer, which we would not have + // been permitted to write to disk + assert.Equal(t, fs.FileMode(0400).String(), fooStat.Mode().String()) + assert.Equal(t, fs.FileMode(0400).String(), barStat.Mode().String()) + assert.Equal(t, myUID, int(fooStat.Sys().(*syscall.Stat_t).Uid)) + assert.Equal(t, myGID, int(fooStat.Sys().(*syscall.Stat_t).Gid)) + assert.Equal(t, myUID, int(barStat.Sys().(*syscall.Stat_t).Uid)) + assert.Equal(t, myGID, int(barStat.Sys().(*syscall.Stat_t).Gid)) + + assert.NoError(t, fooErr) + assert.NoError(t, barErr) + }, + }, + { + name: "fails if catalog filter cannot find expected image label", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + filterFunc: filterForCatalogImage(), + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.ErrorContains(t, err, "catalog image is missing the required label") + }, + }, + { + name: "catalog filter includes only files under label's directory tree", + ownerID: myOwner, + srcRef: myTaggedRef, + canonicalRef: myCanonicalRef, + filterFunc: filterForCatalogImage(), + imgConfig: ocispecv1.Image{ + Config: ocispecv1.ImageConfig{ + Labels: map[string]string{ + ConfigDirLabel: "my-fav-configs", + }, + }, + }, + layers: layerFSIterator( + fstest.MapFS{ + "foo": &fstest.MapFile{Data: []byte("foo_layer1"), Mode: 0000}, + "my-fav-configs/catalog.json": &fstest.MapFile{Data: []byte(`{}`), Mode: 0000}, + }, + ), + expect: func(t *testing.T, cache *diskCache, fsys fs.FS, modTime time.Time, err error) { + require.NoError(t, err) + + _, fooErr := fs.Stat(fsys, "foo") + assert.ErrorIs(t, fooErr, fs.ErrNotExist) //nolint:testifylint + + catalogDataStat, catalogDataErr := fs.Stat(fsys, "my-fav-configs/catalog.json") + assert.NoError(t, catalogDataErr) // nolint:testifylint + + // You may have expected 0700 here, but after the files are stored, + // the cache recursively sets them as read-only. The fact that these + // files even exist proves that the filter executed properly because + // they are UID/GID: 0/0 in the fs layer, which we would not have + // been permitted to write to disk + assert.Equal(t, fs.FileMode(0400).String(), catalogDataStat.Mode().String()) + assert.Equal(t, myUID, int(catalogDataStat.Sys().(*syscall.Stat_t).Uid)) + assert.Equal(t, myGID, int(catalogDataStat.Sys().(*syscall.Stat_t).Gid)) + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dc := &diskCache{ + basePath: t.TempDir(), + filterFunc: tc.filterFunc, + } + if tc.setup != nil { + tc.setup(t, dc) + } + fsys, modTime, err := dc.Store(context.Background(), tc.ownerID, tc.srcRef, tc.canonicalRef, tc.imgConfig, tc.layers) + require.NotNil(t, tc.expect, "test case must include an expect function") + tc.expect(t, dc, fsys, modTime, err) + require.NoError(t, fsutil.DeleteReadOnlyRecursive(dc.basePath)) + }) + } +} + +func TestDiskCacheDelete(t *testing.T) { + const myOwner = "myOwner" + + testCases := []struct { + name string + ownerID string + setup func(*testing.T, *diskCache) + expect func(*testing.T, *diskCache, error) + }{ + { + name: "no error when owner does not exist", + ownerID: myOwner, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) // nolint:testifylint + assert.NoDirExists(t, cache.ownerIDPath(myOwner)) + }, + }, + { + name: "does not delete a different owner", + ownerID: myOwner, + setup: func(t *testing.T, cache *diskCache) { + require.NoError(t, os.MkdirAll(cache.ownerIDPath("otherOwner"), 0500)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) //nolint:testifylint + assert.DirExists(t, cache.ownerIDPath("otherOwner")) + assert.NoDirExists(t, cache.ownerIDPath(myOwner)) + }, + }, + { + name: "deletes read-only owner", + ownerID: myOwner, + setup: func(t *testing.T, cache *diskCache) { + ownerIDPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerIDPath, 0700)) + require.NoError(t, os.MkdirAll(cache.unpackPath(myOwner, "subdir"), 0700)) + require.NoError(t, fsutil.SetReadOnlyRecursive(ownerIDPath)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) // nolint:testifylint + assert.NoDirExists(t, cache.ownerIDPath(myOwner)) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dc := &diskCache{basePath: t.TempDir()} + if tc.setup != nil { + tc.setup(t, dc) + } + err := dc.Delete(context.Background(), tc.ownerID) + require.NotNil(t, tc.expect, "test case must include an expect function") + tc.expect(t, dc, err) + require.NoError(t, fsutil.SetWritableRecursive(dc.basePath)) + }) + } +} + +func TestDiskCacheGarbageCollection(t *testing.T) { + const myOwner = "myOwner" + myRef := mustParseCanonical(t, "my.registry.io/ns/repo@sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03") + + testCases := []struct { + name string + ownerID string + keep reference.Canonical + setup func(*testing.T, *diskCache) + expect func(*testing.T, *diskCache, error) + }{ + { + name: "error when owner ID path is not readable", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerPath, 0700)) + require.NoError(t, os.Chmod(ownerPath, 0000)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.ErrorIs(t, err, os.ErrPermission) + }, + }, + { + name: "error when owner ID path is not writeable and contains gc-able content", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerPath, 0700)) + require.NoError(t, os.MkdirAll(cache.unpackPath(myOwner, "subdir"), 0700)) + require.NoError(t, os.Chmod(ownerPath, 0500)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.ErrorIs(t, err, os.ErrPermission) + }, + }, + { + name: "error when base path is not writeable and contains gc-able content", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerPath, 0700)) + require.NoError(t, os.MkdirAll(cache.unpackPath(myOwner, "subdir"), 0700)) + require.NoError(t, os.Chmod(cache.basePath, 0500)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.ErrorIs(t, err, os.ErrPermission) + }, + }, + { + name: "no error when owner does not exist", + ownerID: myOwner, + keep: myRef, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) + }, + }, + { + name: "no error when owner has no contents, deletes owner dir", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + ownerPath := cache.ownerIDPath(myOwner) + require.NoError(t, os.MkdirAll(ownerPath, 0700)) + require.NoError(t, fsutil.SetReadOnlyRecursive(ownerPath)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) //nolint:testifylint + assert.NoDirExists(t, cache.ownerIDPath(myOwner)) + }, + }, + { + name: "no error when owner does not have keep reference, deletes owner dir", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + unpackPath := cache.unpackPath(myOwner, "subdir") + require.NoError(t, os.MkdirAll(cache.ownerIDPath(myOwner), 0700)) + require.NoError(t, os.MkdirAll(unpackPath, 0700)) + require.NoError(t, fsutil.SetReadOnlyRecursive(unpackPath)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) //nolint:testifylint + assert.NoDirExists(t, cache.ownerIDPath(myOwner)) + }, + }, + { + name: "deletes everything _except_ keep's data", + ownerID: myOwner, + keep: myRef, + setup: func(t *testing.T, cache *diskCache) { + otherPath := cache.unpackPath(myOwner, "subdir") + unpackPath := cache.unpackPath(myOwner, myRef.Digest()) + require.NoError(t, os.MkdirAll(cache.ownerIDPath(myOwner), 0700)) + require.NoError(t, os.MkdirAll(otherPath, 0700)) + require.NoError(t, os.MkdirAll(unpackPath, 0700)) + require.NoError(t, fsutil.SetReadOnlyRecursive(otherPath)) + require.NoError(t, fsutil.SetReadOnlyRecursive(unpackPath)) + }, + expect: func(t *testing.T, cache *diskCache, err error) { + assert.NoError(t, err) //nolint:testifylint + assert.DirExists(t, cache.unpackPath(myOwner, myRef.Digest())) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dc := &diskCache{basePath: t.TempDir()} + if tc.setup != nil { + tc.setup(t, dc) + } + err := dc.GarbageCollect(context.Background(), tc.ownerID, tc.keep) + require.NotNil(t, tc.expect, "test case must include an expect function") + tc.expect(t, dc, err) + require.NoError(t, fsutil.SetWritableRecursive(dc.basePath)) + }) + } +} + +func mustParseCanonical(t *testing.T, s string) reference.Canonical { + n, err := reference.ParseNamed(s) + require.NoError(t, err) + c, ok := n.(reference.Canonical) + require.True(t, ok, "image reference must be canonical") + return c +} + +func layerFSIterator(layerFilesystems ...fs.FS) iter.Seq[LayerData] { + return func(yield func(data LayerData) bool) { + for i, fsys := range layerFilesystems { + rc := fsTarReader(fsys) + ld := LayerData{ + Reader: rc, + Index: i, + } + stop := !yield(ld) + _ = rc.Close() + if stop { + return + } + } + } +} + +func fsTarReader(fsys fs.FS) io.ReadCloser { + pr, pw := io.Pipe() + tw := tar.NewWriter(pw) + go func() { + err := tw.AddFS(fsys) + _ = pw.CloseWithError(err) + }() + return pr +} diff --git a/internal/shared/util/image/filters.go b/internal/shared/util/image/filters.go new file mode 100644 index 000000000..e32c7fca2 --- /dev/null +++ b/internal/shared/util/image/filters.go @@ -0,0 +1,65 @@ +package image + +import ( + "archive/tar" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containerd/containerd/archive" +) + +// forceOwnershipRWX is a passthrough archive.Filter that sets a tar header's +// Uid and Gid to the current process's Uid and Gid and ensures its permissions +// give the owner full read/write/execute permission. The process Uid and Gid +// are determined when forceOwnershipRWX is called, not when the filter function +// is called. +func forceOwnershipRWX() archive.Filter { + uid := os.Getuid() + gid := os.Getgid() + return func(h *tar.Header) (bool, error) { + h.Uid = uid + h.Gid = gid + h.Mode |= 0700 + return true, nil + } +} + +// onlyPath is an archive.Filter that keeps only files and directories that match p, or +// (if p is a directory) are present under p. onlyPath does not remap files to a new location. +// If an error occurs while comparing the desired path prefix with the tar header's name, the +// filter will return false with that error. +func onlyPath(p string) archive.Filter { + wantPath := path.Clean(strings.TrimPrefix(p, "/")) + return func(h *tar.Header) (bool, error) { + headerPath := path.Clean(strings.TrimPrefix(h.Name, "/")) + relPath, err := filepath.Rel(wantPath, headerPath) + if err != nil { + return false, fmt.Errorf("error getting relative path: %w", err) + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return false, nil + } + return true, nil + } +} + +// allFilters is a composite archive.Filter that executes each filter in the order +// they are given. If any filter returns false or an error, the composite filter will immediately +// return that result to the caller, and no further filters are executed. +func allFilters(filters ...archive.Filter) archive.Filter { + return func(h *tar.Header) (bool, error) { + for _, filter := range filters { + keep, err := filter(h) + if err != nil { + return false, err + } + if !keep { + return false, nil + } + } + return true, nil + } +} diff --git a/internal/shared/util/image/layers_test.go b/internal/shared/util/image/filters_test.go similarity index 98% rename from internal/shared/util/image/layers_test.go rename to internal/shared/util/image/filters_test.go index 0369eb364..10d97455d 100644 --- a/internal/shared/util/image/layers_test.go +++ b/internal/shared/util/image/filters_test.go @@ -119,7 +119,7 @@ func TestOnlyPath(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { for _, srcPath := range tc.srcPaths { - f := OnlyPath(srcPath) + f := onlyPath(srcPath) for _, tarHeader := range tc.tarHeaders { keep, err := f(&tarHeader) tc.assertion(&tarHeader, keep, err) diff --git a/internal/shared/util/image/layers.go b/internal/shared/util/image/layers.go deleted file mode 100644 index 7feef83ae..000000000 --- a/internal/shared/util/image/layers.go +++ /dev/null @@ -1,118 +0,0 @@ -package image - -import ( - "archive/tar" - "context" - "errors" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containerd/containerd/archive" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/pkg/compression" - "github.com/containers/image/v5/types" - "sigs.k8s.io/controller-runtime/pkg/log" - - fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" -) - -// ForceOwnershipRWX is a passthrough archive.Filter that sets a tar header's -// Uid and Gid to the current process's Uid and Gid and ensures its permissions -// give the owner full read/write/execute permission. The process Uid and Gid -// are determined when ForceOwnershipRWX is called, not when the filter function -// is called. -func ForceOwnershipRWX() archive.Filter { - uid := os.Getuid() - gid := os.Getgid() - return func(h *tar.Header) (bool, error) { - h.Uid = uid - h.Gid = gid - h.Mode |= 0700 - return true, nil - } -} - -// OnlyPath is an archive.Filter that keeps only files and directories that match p, or -// (if p is a directory) are present under p. OnlyPath does not remap files to a new location. -// If an error occurs while comparing the desired path prefix with the tar header's name, the -// filter will return false with that error. -func OnlyPath(p string) archive.Filter { - wantPath := path.Clean(strings.TrimPrefix(p, "/")) - return func(h *tar.Header) (bool, error) { - headerPath := path.Clean(strings.TrimPrefix(h.Name, "/")) - relPath, err := filepath.Rel(wantPath, headerPath) - if err != nil { - return false, fmt.Errorf("error getting relative path: %w", err) - } - if relPath == ".." || strings.HasPrefix(relPath, "../") { - return false, nil - } - return true, nil - } -} - -// AllFilters is a composite archive.Filter that executes each filter in the order -// they are given. If any filter returns false or an error, the composite filter will immediately -// return that result to the caller, and no further filters are executed. -func AllFilters(filters ...archive.Filter) archive.Filter { - return func(h *tar.Header) (bool, error) { - for _, filter := range filters { - keep, err := filter(h) - if err != nil { - return false, err - } - if !keep { - return false, nil - } - } - return true, nil - } -} - -// ApplyLayersToDisk writes the layers from img and imgSrc to disk using the provided filter. -// The destination directory will be created, if necessary. If dest is already present, its -// contents will be deleted. If img and imgSrc do not represent the same image, an error will -// be returned due to a mismatch in the expected layers. Once complete, the dest and its contents -// are marked as read-only to provide a safeguard against unintended changes. -func ApplyLayersToDisk(ctx context.Context, dest string, img types.Image, imgSrc types.ImageSource, filter archive.Filter) error { - var applyOpts []archive.ApplyOpt - if filter != nil { - applyOpts = append(applyOpts, archive.WithFilter(filter)) - } - - if err := fsutil.EnsureEmptyDirectory(dest, 0700); err != nil { - return fmt.Errorf("error ensuring empty unpack directory: %w", err) - } - l := log.FromContext(ctx) - l.Info("unpacking image", "path", dest) - for i, layerInfo := range img.LayerInfos() { - if err := func() error { - layerReader, _, err := imgSrc.GetBlob(ctx, layerInfo, none.NoCache) - if err != nil { - return fmt.Errorf("error getting blob for layer[%d]: %w", i, err) - } - defer layerReader.Close() - - decompressed, _, err := compression.AutoDecompress(layerReader) - if err != nil { - return fmt.Errorf("auto-decompress failed: %w", err) - } - defer decompressed.Close() - - if _, err := archive.Apply(ctx, dest, decompressed, applyOpts...); err != nil { - return fmt.Errorf("error applying layer[%d]: %w", i, err) - } - l.Info("applied layer", "layer", i) - return nil - }(); err != nil { - return errors.Join(err, fsutil.DeleteReadOnlyRecursive(dest)) - } - } - if err := fsutil.SetReadOnlyRecursive(dest); err != nil { - return fmt.Errorf("error making unpack directory read-only: %w", err) - } - return nil -} diff --git a/internal/shared/util/image/mocks.go b/internal/shared/util/image/mocks.go new file mode 100644 index 000000000..903983181 --- /dev/null +++ b/internal/shared/util/image/mocks.go @@ -0,0 +1,61 @@ +package image + +import ( + "context" + "io/fs" + "iter" + "time" + + "github.com/containers/image/v5/docker/reference" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var _ Puller = (*MockPuller)(nil) + +// MockPuller is a utility for mocking out a Puller interface +type MockPuller struct { + ImageFS fs.FS + Ref reference.Canonical + ModTime time.Time + Error error +} + +func (ms *MockPuller) Pull(_ context.Context, _, _ string, _ Cache) (fs.FS, reference.Canonical, time.Time, error) { + if ms.Error != nil { + return nil, nil, time.Time{}, ms.Error + } + + return ms.ImageFS, ms.Ref, ms.ModTime, nil +} + +var _ Cache = (*MockCache)(nil) + +type MockCache struct { + FetchFS fs.FS + FetchModTime time.Time + FetchError error + + StoreFS fs.FS + StoreModTime time.Time + StoreError error + + DeleteErr error + + GarbageCollectError error +} + +func (m MockCache) Fetch(_ context.Context, _ string, _ reference.Canonical) (fs.FS, time.Time, error) { + return m.FetchFS, m.FetchModTime, m.FetchError +} + +func (m MockCache) Store(_ context.Context, _ string, _ reference.Named, _ reference.Canonical, _ ocispecv1.Image, _ iter.Seq[LayerData]) (fs.FS, time.Time, error) { + return m.StoreFS, m.StoreModTime, m.StoreError +} + +func (m MockCache) Delete(_ context.Context, _ string) error { + return m.DeleteErr +} + +func (m MockCache) GarbageCollect(_ context.Context, _ string, _ reference.Canonical) error { + return m.GarbageCollectError +} diff --git a/internal/shared/util/image/pull.go b/internal/shared/util/image/pull.go new file mode 100644 index 000000000..cbef0dcd7 --- /dev/null +++ b/internal/shared/util/image/pull.go @@ -0,0 +1,275 @@ +package image + +import ( + "context" + "errors" + "fmt" + "io/fs" + "iter" + "os" + "time" + + "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/types" + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/operator-framework/operator-controller/internal/shared/util/http" +) + +type Puller interface { + Pull(context.Context, string, string, Cache) (fs.FS, reference.Canonical, time.Time, error) +} + +var insecurePolicy = []byte(`{"default":[{"type":"insecureAcceptAnything"}]}`) + +type ContainersImagePuller struct { + SourceCtxFunc func(context.Context) (*types.SystemContext, error) +} + +func (p *ContainersImagePuller) Pull(ctx context.Context, ownerID string, ref string, cache Cache) (fs.FS, reference.Canonical, time.Time, error) { + srcCtx, err := p.SourceCtxFunc(ctx) + if err != nil { + return nil, nil, time.Time{}, err + } + + dockerRef, err := reference.ParseNamed(ref) + if err != nil { + return nil, nil, time.Time{}, reconcile.TerminalError(fmt.Errorf("error parsing image reference %q: %w", ref, err)) + } + + l := log.FromContext(ctx, "ref", dockerRef.String()) + ctx = log.IntoContext(ctx, l) + + fsys, canonicalRef, modTime, err := p.pull(ctx, ownerID, dockerRef, cache, srcCtx) + if err != nil { + // Log any CertificateVerificationErrors, and log Docker Certificates if necessary + if http.LogCertificateVerificationError(err, l) { + http.LogDockerCertificates(srcCtx.DockerCertPath, l) + } + return nil, nil, time.Time{}, err + } + return fsys, canonicalRef, modTime, nil +} + +func (p *ContainersImagePuller) pull(ctx context.Context, ownerID string, dockerRef reference.Named, cache Cache, srcCtx *types.SystemContext) (fs.FS, reference.Canonical, time.Time, error) { + l := log.FromContext(ctx) + + dockerImgRef, err := docker.NewReference(dockerRef) + if err != nil { + return nil, nil, time.Time{}, reconcile.TerminalError(fmt.Errorf("error creating reference: %w", err)) + } + + // Reload registries cache in case of configuration update + sysregistriesv2.InvalidateCache() + + ////////////////////////////////////////////////////// + // + // Resolve a canonical reference for the image. + // + ////////////////////////////////////////////////////// + canonicalRef, err := resolveCanonicalRef(ctx, dockerImgRef, srcCtx) + if err != nil { + return nil, nil, time.Time{}, err + } + + l = l.WithValues("digest", canonicalRef.Digest().String()) + ctx = log.IntoContext(ctx, l) + + /////////////////////////////////////////////////////// + // + // Check if the cache has already applied the + // canonical keep. If so, we're done. + // + /////////////////////////////////////////////////////// + fsys, modTime, err := cache.Fetch(ctx, ownerID, canonicalRef) + if err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error checking cache for existing content: %w", err) + } + if fsys != nil { + return fsys, canonicalRef, modTime, nil + } + + ////////////////////////////////////////////////////// + // + // Create an OCI layout reference for the destination, + // where we will temporarily store the image in order + // to unpack it. + // + // We use the OCI layout as a temporary storage because + // copy.Image can concurrently pull all the layers. + // + ////////////////////////////////////////////////////// + layoutDir, err := os.MkdirTemp("", fmt.Sprintf("oci-layout-%s-", ownerID)) + if err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error creating temporary directory: %w", err) + } + defer func() { + if err := os.RemoveAll(layoutDir); err != nil { + l.Error(err, "error removing temporary OCI layout directory") + } + }() + + layoutImgRef, err := layout.NewReference(layoutDir, canonicalRef.String()) + if err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error creating reference: %w", err) + } + + ////////////////////////////////////////////////////// + // + // Load an image signature policy and build + // a policy context for the image pull. + // + ////////////////////////////////////////////////////// + policyContext, err := loadPolicyContext(srcCtx, l) + if err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error loading policy context: %w", err) + } + defer func() { + if err := policyContext.Destroy(); err != nil { + l.Error(err, "error destroying policy context") + } + }() + + ////////////////////////////////////////////////////// + // + // Pull the image from the source to the destination + // + ////////////////////////////////////////////////////// + if _, err := copy.Image(ctx, policyContext, layoutImgRef, dockerImgRef, ©.Options{ + SourceCtx: srcCtx, + // We use the OCI layout as a temporary storage and + // pushing signatures for OCI images is not supported + // so we remove the source signatures when copying. + // Signature validation will still be performed + // accordingly to a provided policy context. + RemoveSignatures: true, + }); err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error copying image: %w", err) + } + l.Info("pulled image") + + ////////////////////////////////////////////////////// + // + // Mount the image we just pulled + // + ////////////////////////////////////////////////////// + fsys, modTime, err = p.applyImage(ctx, ownerID, dockerRef, canonicalRef, layoutImgRef, cache, srcCtx) + if err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error applying image: %w", err) + } + + ///////////////////////////////////////////////////////////// + // + // Clean up any images from the cache that we no longer need. + // + ///////////////////////////////////////////////////////////// + if err := cache.GarbageCollect(ctx, ownerID, canonicalRef); err != nil { + return nil, nil, time.Time{}, fmt.Errorf("error deleting old images: %w", err) + } + return fsys, canonicalRef, modTime, nil +} + +func resolveCanonicalRef(ctx context.Context, imgRef types.ImageReference, srcCtx *types.SystemContext) (reference.Canonical, error) { + if canonicalRef, ok := imgRef.DockerReference().(reference.Canonical); ok { + return canonicalRef, nil + } + + imgSrc, err := imgRef.NewImageSource(ctx, srcCtx) + if err != nil { + return nil, fmt.Errorf("error creating image source: %w", err) + } + defer imgSrc.Close() + + manifestBlob, _, err := imgSrc.GetManifest(ctx, nil) + if err != nil { + return nil, fmt.Errorf("error getting manifest: %w", err) + } + imgDigest, err := manifest.Digest(manifestBlob) + if err != nil { + return nil, fmt.Errorf("error getting digest of manifest: %w", err) + } + canonicalRef, err := reference.WithDigest(reference.TrimNamed(imgRef.DockerReference()), imgDigest) + if err != nil { + return nil, fmt.Errorf("error creating canonical reference: %w", err) + } + return canonicalRef, nil +} + +func (p *ContainersImagePuller) applyImage(ctx context.Context, ownerID string, srcRef reference.Named, canonicalRef reference.Canonical, srcImgRef types.ImageReference, cache Cache, sourceContext *types.SystemContext) (fs.FS, time.Time, error) { + imgSrc, err := srcImgRef.NewImageSource(ctx, sourceContext) + if err != nil { + return nil, time.Time{}, fmt.Errorf("error creating image source: %w", err) + } + img, err := image.FromSource(ctx, sourceContext, imgSrc) + if err != nil { + return nil, time.Time{}, errors.Join( + fmt.Errorf("error reading image: %w", err), + imgSrc.Close(), + ) + } + defer func() { + if err := img.Close(); err != nil { + panic(err) + } + }() + + ociImg, err := img.OCIConfig(ctx) + if err != nil { + return nil, time.Time{}, err + } + + layerIter := iter.Seq[LayerData](func(yield func(LayerData) bool) { + for i, layerInfo := range img.LayerInfos() { + ld := LayerData{Index: i} + layerReader, _, err := imgSrc.GetBlob(ctx, layerInfo, none.NoCache) + if err != nil { + ld.Err = fmt.Errorf("error getting layer blob reader: %w", err) + if !yield(ld) { + return + } + } + defer layerReader.Close() + + decompressed, _, err := compression.AutoDecompress(layerReader) + if err != nil { + ld.Err = fmt.Errorf("error decompressing layer: %w", err) + if !yield(ld) { + return + } + } + defer decompressed.Close() + + ld.Reader = decompressed + if !yield(ld) { + return + } + } + }) + + return cache.Store(ctx, ownerID, srcRef, canonicalRef, *ociImg, layerIter) +} + +func loadPolicyContext(sourceContext *types.SystemContext, l logr.Logger) (*signature.PolicyContext, error) { + policy, err := signature.DefaultPolicy(sourceContext) + // TODO: there are security implications to silently moving to an insecure policy + // tracking issue: https://github.com/operator-framework/operator-controller/issues/1622 + if err != nil { + l.Info("no default policy found, using insecure policy") + policy, err = signature.NewPolicyFromBytes(insecurePolicy) + } + if err != nil { + return nil, fmt.Errorf("error loading signature policy: %w", err) + } + return signature.NewPolicyContext(policy) +} diff --git a/internal/shared/util/image/pull_test.go b/internal/shared/util/image/pull_test.go new file mode 100644 index 000000000..5aca3d75e --- /dev/null +++ b/internal/shared/util/image/pull_test.go @@ -0,0 +1,305 @@ +package image + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "testing" + "testing/fstest" + "time" + + "github.com/BurntSushi/toml" + "github.com/containerd/containerd/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/google/go-containerregistry/pkg/registry" + "github.com/opencontainers/go-digest" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + fsutil "github.com/operator-framework/operator-controller/internal/shared/util/fs" +) + +const ( + testFileName string = "test-file" + testFileContents string = "test-content" +) + +func TestContainersImagePuller_Pull(t *testing.T) { + const myOwner = "myOwner" + myTagRef, myCanonicalRef, shutdown := setupRegistry(t) + defer shutdown() + + myModTime := time.Date(1985, 10, 25, 7, 53, 0, 0, time.FixedZone("PDT", -8*60*60)) + defaultContextFunc := func(context.Context) (*types.SystemContext, error) { return &types.SystemContext{}, nil } + + testCases := []struct { + name string + ownerID string + srcRef string + cache Cache + contextFunc func(context.Context) (*types.SystemContext, error) + expect func(*testing.T, fs.FS, reference.Canonical, time.Time, error) + }{ + { + name: "returns terminal error for invalid reference", + ownerID: myOwner, + srcRef: "invalid-src-ref", + contextFunc: defaultContextFunc, + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "error parsing image reference") + require.ErrorIs(t, err, reconcile.TerminalError(nil)) + }, + }, + { + name: "returns terminal error if reference lacks tag or digest", + ownerID: myOwner, + srcRef: reference.TrimNamed(myTagRef).String(), + contextFunc: defaultContextFunc, + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "error creating reference") + require.ErrorIs(t, err, reconcile.TerminalError(nil)) + }, + }, + { + name: "returns error if failure getting SystemContext", + ownerID: myOwner, + srcRef: myCanonicalRef.String(), + contextFunc: func(ctx context.Context) (*types.SystemContext, error) { + return nil, errors.New("sourcecontextfunc error") + }, + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "sourcecontextfunc error") + }, + }, + { + name: "returns error if failure connecting to reference's registry", + ownerID: myOwner, + srcRef: myTagRef.String() + "-non-existent", + contextFunc: defaultContextFunc, + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "pinging container registry") + }, + }, + { + name: "returns error if tag ref is not found", + ownerID: myOwner, + srcRef: myTagRef.String() + "-non-existent", + contextFunc: buildSourceContextFunc(t, myTagRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "manifest unknown") + }, + }, + { + name: "return error if cache fetch fails", + ownerID: myOwner, + srcRef: myCanonicalRef.String(), + cache: MockCache{FetchError: errors.New("fetch error")}, + contextFunc: defaultContextFunc, + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "fetch error") + }, + }, + { + name: "return canonical ref's data from cache, if present", + ownerID: myOwner, + srcRef: myCanonicalRef.String(), + cache: MockCache{ + FetchFS: fstest.MapFS{ + testFileName: &fstest.MapFile{Data: []byte(testFileContents)}, + }, + FetchModTime: myModTime, + }, + contextFunc: buildSourceContextFunc(t, myCanonicalRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.NoError(t, err) + actualFileData, err := fs.ReadFile(fsys, testFileName) + require.NoError(t, err) + assert.Equal(t, testFileContents, string(actualFileData)) + + assert.Equal(t, myCanonicalRef.String(), canonical.String()) + assert.Equal(t, myModTime, modTime) + }, + }, + { + name: "return tag ref's data from cache, if present", + ownerID: myOwner, + srcRef: myTagRef.String(), + cache: MockCache{ + FetchFS: fstest.MapFS{ + testFileName: &fstest.MapFile{Data: []byte(testFileContents)}, + }, + FetchModTime: myModTime, + }, + contextFunc: buildSourceContextFunc(t, myTagRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.NoError(t, err) + actualFileData, err := fs.ReadFile(fsys, testFileName) + require.NoError(t, err) + assert.Equal(t, testFileContents, string(actualFileData)) + + assert.Equal(t, myCanonicalRef.String(), canonical.String()) + assert.Equal(t, myModTime, modTime) + }, + }, + { + name: "returns error if failure storing content in cache", + ownerID: myOwner, + srcRef: myCanonicalRef.String(), + cache: MockCache{ + StoreError: errors.New("store error"), + }, + contextFunc: buildSourceContextFunc(t, myCanonicalRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.ErrorContains(t, err, "store error") + }, + }, + { + name: "returns stored data upon pull success", + ownerID: myOwner, + srcRef: myTagRef.String(), + cache: MockCache{ + StoreFS: fstest.MapFS{ + testFileName: &fstest.MapFile{Data: []byte(testFileContents)}, + }, + StoreModTime: myModTime, + }, + contextFunc: buildSourceContextFunc(t, myTagRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.NoError(t, err) + + actualFileData, err := fs.ReadFile(fsys, testFileName) + require.NoError(t, err) + assert.Equal(t, testFileContents, string(actualFileData)) + + assert.Equal(t, myCanonicalRef.String(), canonical.String()) + assert.Equal(t, myModTime, modTime) + }, + }, + { + name: "returns error if cache garbage collection fails", + ownerID: myOwner, + srcRef: myTagRef.String(), + cache: MockCache{ + StoreFS: fstest.MapFS{ + testFileName: &fstest.MapFile{Data: []byte(testFileContents)}, + }, + StoreModTime: myModTime, + GarbageCollectError: errors.New("garbage collect error"), + }, + contextFunc: buildSourceContextFunc(t, myTagRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + assert.Nil(t, fsys) + assert.Nil(t, canonical) + assert.Zero(t, modTime) + assert.ErrorContains(t, err, "garbage collect error") + }, + }, + { + name: "succeeds storing actual image contents using real cache", + ownerID: myOwner, + srcRef: myTagRef.String(), + cache: &diskCache{ + basePath: t.TempDir(), + filterFunc: func(ctx context.Context, named reference.Named, image ocispecv1.Image) (archive.Filter, error) { + return forceOwnershipRWX(), nil + }, + }, + contextFunc: buildSourceContextFunc(t, myTagRef), + expect: func(t *testing.T, fsys fs.FS, canonical reference.Canonical, modTime time.Time, err error) { + require.NoError(t, err) + actualFileData, err := fs.ReadFile(fsys, testFileName) + require.NoError(t, err) + assert.Equal(t, testFileContents, string(actualFileData)) + assert.Equal(t, myCanonicalRef.String(), canonical.String()) + + // Don't assert modTime since it is an implementation detail + // of the cache, which we are not testing here. + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + puller := ContainersImagePuller{ + SourceCtxFunc: tc.contextFunc, + } + fsys, canonicalRef, modTime, err := puller.Pull(context.Background(), tc.ownerID, tc.srcRef, tc.cache) + require.NotNil(t, tc.expect, "expect function must be defined") + tc.expect(t, fsys, canonicalRef, modTime, err) + + if dc, ok := tc.cache.(*diskCache); ok && dc.basePath != "" { + require.NoError(t, fsutil.DeleteReadOnlyRecursive(dc.basePath)) + } + }) + } +} + +func setupRegistry(t *testing.T) (reference.NamedTagged, reference.Canonical, func()) { + server := httptest.NewServer(registry.New()) + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Generate an image with file contents + img, err := crane.Image(map[string][]byte{testFileName: []byte(testFileContents)}) + require.NoError(t, err) + + imageTagRef, err := newReference(serverURL.Host, "test-repo/test-image", "test-tag") + require.NoError(t, err) + + imgDigest, err := img.Digest() + require.NoError(t, err) + + imageDigestRef, err := reference.WithDigest(reference.TrimNamed(imageTagRef), digest.Digest(imgDigest.String())) + require.NoError(t, err) + + require.NoError(t, crane.Push(img, imageTagRef.String())) + + cleanup := func() { + server.Close() + } + return imageTagRef, imageDigestRef, cleanup +} + +func newReference(host, repo, tag string) (reference.NamedTagged, error) { + ref, err := reference.ParseNamed(fmt.Sprintf("%s/%s", host, repo)) + if err != nil { + return nil, err + } + return reference.WithTag(ref, tag) +} + +func buildSourceContextFunc(t *testing.T, ref reference.Named) func(context.Context) (*types.SystemContext, error) { + return func(ctx context.Context) (*types.SystemContext, error) { + // Build a containers/image context that allows pulling from the test registry insecurely + registriesConf := sysregistriesv2.V2RegistriesConf{Registries: []sysregistriesv2.Registry{ + { + Prefix: reference.Domain(ref), + Endpoint: sysregistriesv2.Endpoint{ + Location: reference.Domain(ref), + Insecure: true, + }, + }, + }} + configDir := t.TempDir() + registriesConfPath := filepath.Join(configDir, "registries.conf") + f, err := os.Create(registriesConfPath) + require.NoError(t, err) + + enc := toml.NewEncoder(f) + require.NoError(t, enc.Encode(registriesConf)) + require.NoError(t, f.Close()) + + return &types.SystemContext{ + SystemRegistriesConfPath: registriesConfPath, + }, nil + } +}