From 6c4be889280504fd208c1f06cb2f77f65a124da0 Mon Sep 17 00:00:00 2001 From: Jordan Keister Date: Thu, 6 Feb 2025 11:11:59 -0600 Subject: [PATCH] linter feature code - errcheck - stylecheck - nestif - prealloc - errorlint - nonamedreturns Signed-off-by: Jordan Keister --- alpha/action/render.go | 13 +++- alpha/declcfg/load.go | 6 +- alpha/declcfg/model_to_declcfg.go | 1 + alpha/declcfg/write.go | 34 +++++---- alpha/model/error.go | 7 +- alpha/model/model.go | 1 + alpha/template/semver/semver.go | 3 + cmd/opm/alpha/bundle/extract.go | 2 +- cmd/opm/alpha/bundle/unpack.go | 1 + cmd/opm/alpha/list/cmd.go | 12 ++- cmd/opm/alpha/template/basic.go | 4 +- cmd/opm/alpha/template/semver.go | 4 +- cmd/opm/generate/cmd.go | 2 +- cmd/opm/internal/util/util.go | 4 +- cmd/opm/main.go | 11 ++- cmd/opm/render/cmd.go | 4 +- cmd/opm/serve/serve.go | 16 ++-- pkg/api/api_to_model.go | 1 + pkg/api/model_to_api.go | 12 +-- pkg/cache/json.go | 4 +- pkg/cache/pkgs.go | 2 + pkg/cache/pogrebv1.go | 10 +-- pkg/cache/tar.go | 2 +- pkg/client/client.go | 3 +- pkg/client/errors.go | 1 + pkg/client/kubeclient.go | 10 ++- pkg/configmap/configmap.go | 17 +++-- pkg/containertools/containertool.go | 10 ++- pkg/containertools/dockerfilegenerator.go | 8 +- pkg/containertools/labelreader.go | 1 + pkg/containertools/runner.go | 10 ++- pkg/image/containerdregistry/options.go | 14 ++-- pkg/image/containerdregistry/registry.go | 2 +- pkg/image/execregistry/registry.go | 2 +- pkg/lib/bundle/build.go | 1 + pkg/lib/bundle/errors.go | 1 + pkg/lib/bundle/generate.go | 10 ++- pkg/lib/bundle/interpreter.go | 5 +- pkg/lib/bundle/validate.go | 16 +++- pkg/lib/image/registry.go | 3 +- pkg/lib/indexer/indexer.go | 18 +++-- pkg/lib/registry/registry.go | 7 +- pkg/lib/semver/semver.go | 2 + pkg/lib/tmp/copy.go | 2 +- pkg/lib/unstructured/unstructured.go | 2 +- pkg/lib/validation/bundle.go | 9 ++- pkg/mirror/options.go | 28 +++---- pkg/prettyunmarshaler/prettyunmarshaler.go | 14 ++-- pkg/registry/bundle.go | 15 +++- pkg/registry/bundlegraphloader.go | 3 + pkg/registry/channelupdateoptions.go | 1 + pkg/registry/csv.go | 39 +++++----- pkg/registry/decode.go | 22 +++--- pkg/registry/directoryGraphLoader.go | 1 + pkg/registry/empty.go | 12 +-- pkg/registry/parse.go | 3 + pkg/registry/populator.go | 3 + pkg/registry/registry_to_model.go | 3 +- pkg/registry/types.go | 8 ++ pkg/sqlite/configmap.go | 16 ++-- pkg/sqlite/conversion.go | 1 + pkg/sqlite/db_options.go | 2 + pkg/sqlite/deprecate.go | 1 + pkg/sqlite/deprecationmessage.go | 2 +- pkg/sqlite/directory.go | 4 +- pkg/sqlite/load.go | 60 ++++++++------- pkg/sqlite/loadprocs.go | 3 + pkg/sqlite/migrations/001_related_images.go | 12 +-- pkg/sqlite/migrations/003_required_apis.go | 40 +++++----- .../migrations/005_version_skiprange.go | 4 +- .../006_associate_apis_with_bundle.go | 4 +- pkg/sqlite/migrations/007_replaces_skips.go | 14 ++-- pkg/sqlite/migrations/009_properties.go | 4 +- .../010_set_bundlepath_pkg_property.go | 8 +- pkg/sqlite/migrations/migrations.go | 1 + pkg/sqlite/migrator.go | 14 ++-- pkg/sqlite/migrator_test.go | 18 +++-- pkg/sqlite/query.go | 74 ++++++++++--------- test/e2e/ctx/ctx.go | 1 + test/e2e/ctx/provisioner_kubeconfig.go | 1 + 80 files changed, 445 insertions(+), 301 deletions(-) diff --git a/alpha/action/render.go b/alpha/action/render.go index 07631b7c4..6bb64bef2 100644 --- a/alpha/action/render.go +++ b/alpha/action/render.go @@ -70,10 +70,13 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { if err != nil { return nil, fmt.Errorf("create registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() r.Registry = reg } + // nolint:prealloc var cfgs []declcfg.DeclarativeConfig for _, ref := range r.Refs { cfg, err := r.renderReference(ctx, ref) @@ -123,6 +126,7 @@ func (r Render) renderReference(ctx context.Context, ref string) (*declcfg.Decla if err != nil { return r.imageToDeclcfg(ctx, ref) } + // nolint:nestif if stat.IsDir() { dirEntries, err := os.ReadDir(ref) if err != nil { @@ -178,6 +182,7 @@ func (r Render) imageToDeclcfg(ctx context.Context, imageRef string) (*declcfg.D } var cfg *declcfg.DeclarativeConfig + // nolint:nestif if dbFile, ok := labels[containertools.DbLocationLabel]; ok { if !r.AllowedRefMask.Allowed(RefSqliteImage) { return nil, fmt.Errorf("cannot render sqlite image: %w", ErrNotAllowed) @@ -326,10 +331,10 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { return nil, fmt.Errorf("get related images for bundle %q: %v", bundle.Name, err) } - var csvJson []byte + var csvJSON []byte for _, obj := range bundle.Objects { if obj.GetKind() == "ClusterServiceVersion" { - csvJson, err = json.Marshal(obj) + csvJSON, err = json.Marshal(obj) if err != nil { return nil, fmt.Errorf("marshal CSV JSON for bundle %q: %v", bundle.Name, err) } @@ -344,7 +349,7 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { Properties: props, RelatedImages: relatedImages, Objects: objs, - CsvJSON: string(csvJson), + CsvJSON: string(csvJSON), }, nil } diff --git a/alpha/declcfg/load.go b/alpha/declcfg/load.go index f811b3145..7cf43ccfe 100644 --- a/alpha/declcfg/load.go +++ b/alpha/declcfg/load.go @@ -174,7 +174,7 @@ func sendPaths(ctx context.Context, root fs.FS, pathChan chan<- string) error { }) } -func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, options LoadOptions) error { +func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, _ LoadOptions) error { for { select { case <-ctx.Done(): // don't block on receiving from pathChan @@ -205,11 +205,11 @@ func readBundleObjects(b *Bundle) error { if err := json.Unmarshal(props.Value, &obj); err != nil { return fmt.Errorf("package %q, bundle %q: parse property at index %d as bundle object: %v", b.Package, b.Name, i, err) } - objJson, err := yaml.ToJSON(obj.Data) + objJSON, err := yaml.ToJSON(obj.Data) if err != nil { return fmt.Errorf("package %q, bundle %q: convert bundle object property at index %d to JSON: %v", b.Package, b.Name, i, err) } - b.Objects = append(b.Objects, string(objJson)) + b.Objects = append(b.Objects, string(objJSON)) } b.CsvJSON = extractCSV(b.Objects) return nil diff --git a/alpha/declcfg/model_to_declcfg.go b/alpha/declcfg/model_to_declcfg.go index 14424d9f0..23f23c482 100644 --- a/alpha/declcfg/model_to_declcfg.go +++ b/alpha/declcfg/model_to_declcfg.go @@ -103,6 +103,7 @@ func traverseModelChannels(mpkg model.Package) ([]Channel, []Bundle) { channels = append(channels, c) } + // nolint:prealloc var bundles []Bundle for _, b := range bundleMap { b.Properties = property.Deduplicate(b.Properties) diff --git a/alpha/declcfg/write.go b/alpha/declcfg/write.go index 9856c2e1e..686d5534b 100644 --- a/alpha/declcfg/write.go +++ b/alpha/declcfg/write.go @@ -128,6 +128,7 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) for _, c := range cfg.Channels { filteredChannel := writer.filterChannel(&c, versionMap, minVersion, minEdgePackage) + // nolint:nestif if filteredChannel != nil { pkgBuilder, ok := pkgs[c.Package] if !ok { @@ -154,17 +155,17 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) bundleDeprecation = ":::deprecated" } - entryId := fmt.Sprintf("%s-%s", channelID, ce.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryId, ce.Name, bundleDeprecation)) + entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryID, ce.Name, bundleDeprecation)) if len(ce.Replaces) > 0 { - replacesId := fmt.Sprintf("%s-%s", channelID, ce.Replaces) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesId, ce.Replaces, "replace", entryId, ce.Name)) + replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name)) } if len(ce.Skips) > 0 { for _, s := range ce.Skips { - skipsId := fmt.Sprintf("%s-%s", channelID, s) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsId, s, "skip", entryId, ce.Name)) + skipsID := fmt.Sprintf("%s-%s", channelID, s) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name)) } } if len(ce.SkipRange) > 0 { @@ -172,8 +173,8 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) if err == nil { for _, edgeName := range filteredChannel.Entries { if skipRange(versionMap[edgeName.Name]) { - skipRangeId := fmt.Sprintf("%s-%s", channelID, edgeName.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeId, edgeName.Name, "skipRange", ce.SkipRange, entryId, ce.Name)) + skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name)) } } } else { @@ -186,8 +187,8 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } } - out.Write([]byte("graph LR\n")) - out.Write([]byte(fmt.Sprintf(" classDef deprecated fill:#E8960F\n"))) + _, _ = out.Write([]byte("graph LR\n")) + _, _ = out.Write([]byte(" classDef deprecated fill:#E8960F\n")) pkgNames := []string{} for pname := range pkgs { pkgNames = append(pkgNames, pname) @@ -196,19 +197,19 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) return pkgNames[i] < pkgNames[j] }) for _, pkgName := range pkgNames { - out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) - out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) - out.Write([]byte(pkgs[pkgName].String())) - out.Write([]byte(" end\n")) + _, _ = out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) + _, _ = out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) + _, _ = out.Write([]byte(pkgs[pkgName].String())) + _, _ = out.Write([]byte(" end\n")) } if deprecatedPackage != "" { - out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) + _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) } if len(deprecatedChannels) > 0 { for _, deprecatedChannel := range deprecatedChannels { - out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) + _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) } } @@ -236,6 +237,7 @@ func (writer *MermaidWriter) filterChannel(c *Channel, versionMap map[string]sem out := &Channel{Name: c.Name, Package: c.Package, Properties: c.Properties, Entries: []ChannelEntry{}} for _, ce := range c.Entries { filteredCe := ChannelEntry{Name: ce.Name} + // nolint:nestif if writer.MinEdgeName == "" { // no minimum-edge specified filteredCe.SkipRange = ce.SkipRange diff --git a/alpha/model/error.go b/alpha/model/error.go index 0ad0f7adb..e99cb2ca8 100644 --- a/alpha/model/error.go +++ b/alpha/model/error.go @@ -2,6 +2,7 @@ package model import ( "bytes" + "errors" "fmt" "strings" ) @@ -31,7 +32,7 @@ func (v *validationError) Error() string { func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) string { for _, s := range seen { - if v == s { + if errors.Is(v, s) { return "" } } @@ -56,7 +57,9 @@ func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) st } else { subPrefix = append(subPrefix, []rune("├── ")...) } - if verr, ok := serr.(*validationError); ok { + + var verr *validationError + if errors.As(serr, &verr) { errMsg.WriteString(verr.errorPrefix(subPrefix, subLast, seen)) } else { errMsg.WriteString(fmt.Sprintf("%s%s\n", string(subPrefix), serr)) diff --git a/alpha/model/model.go b/alpha/model/model.go index d570f93c3..9b4e3ae85 100644 --- a/alpha/model/model.go +++ b/alpha/model/model.go @@ -161,6 +161,7 @@ func (i *Icon) Validate() error { return result.orNil() } +// nolint:unused func (i *Icon) validateData() error { if !filetype.IsImage(i.Data) { return errors.New("icon data is not an image") diff --git a/alpha/template/semver/semver.go b/alpha/template/semver/semver.go index d44e1c9d9..2f0a8676b 100644 --- a/alpha/template/semver/semver.go +++ b/alpha/template/semver/semver.go @@ -22,6 +22,7 @@ func (t Template) Render(ctx context.Context) (*declcfg.DeclarativeConfig, error return nil, fmt.Errorf("render: unable to read file: %v", err) } + // nolint:prealloc var cfgs []declcfg.DeclarativeConfig bundleDict := buildBundleList(*sv) @@ -211,6 +212,7 @@ func (sv *semverTemplate) generateChannels(semverChannels *bundleVersions) []dec // sort the channel archetypes in ascending order so we can traverse the bundles in order of // their source channel's priority + // nolint:prealloc var archetypesByPriority []channelArchetype for k := range channelPriorities { archetypesByPriority = append(archetypesByPriority, k) @@ -391,6 +393,7 @@ func getMinorVersion(v semver.Version) semver.Version { } } +// nolint:unused func getMajorVersion(v semver.Version) semver.Version { return semver.Version{ Major: v.Major, diff --git a/cmd/opm/alpha/bundle/extract.go b/cmd/opm/alpha/bundle/extract.go index 3952a52b8..3cc7e968f 100644 --- a/cmd/opm/alpha/bundle/extract.go +++ b/cmd/opm/alpha/bundle/extract.go @@ -33,7 +33,7 @@ func init() { extractCmd.Flags().StringP("namespace", "n", "openshift-operator-lifecycle-manager", "namespace to write configmap data") extractCmd.Flags().Uint64P("datalimit", "l", 1<<20, "maximum limit in bytes for total bundle data") extractCmd.Flags().BoolP("gzip", "z", false, "enable gzip compression of configmap data") - extractCmd.MarkPersistentFlagRequired("configmapname") + _ = extractCmd.MarkPersistentFlagRequired("configmapname") } func runExtractCmd(cmd *cobra.Command, _ []string) error { diff --git a/cmd/opm/alpha/bundle/unpack.go b/cmd/opm/alpha/bundle/unpack.go index 82bddff77..369442eb0 100644 --- a/cmd/opm/alpha/bundle/unpack.go +++ b/cmd/opm/alpha/bundle/unpack.go @@ -58,6 +58,7 @@ func unpackBundle(cmd *cobra.Command, args []string) error { return err } + // nolint:nestif if info, err := os.Stat(out); err != nil { if os.IsNotExist(err) { err = os.MkdirAll(out, 0755) diff --git a/cmd/opm/alpha/list/cmd.go b/cmd/opm/alpha/list/cmd.go index 0f234e39b..79f9fd9c8 100644 --- a/cmd/opm/alpha/list/cmd.go +++ b/cmd/opm/alpha/list/cmd.go @@ -43,7 +43,9 @@ func newPackagesCmd() *cobra.Command { if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lp := action.ListPackages{IndexReference: args[0], Registry: reg} res, err := lp.Run(cmd.Context()) if err != nil { @@ -72,7 +74,9 @@ func newChannelsCmd() *cobra.Command { if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lc := action.ListChannels{IndexReference: args[0], Registry: reg} if len(args) > 1 { lc.PackageName = args[1] @@ -106,7 +110,9 @@ for each channel in which the bundle is present). if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lb := action.ListBundles{IndexReference: args[0], Registry: reg} if len(args) > 1 { lb.PackageName = args[1] diff --git a/cmd/opm/alpha/template/basic.go b/cmd/opm/alpha/template/basic.go index 4195bd0fe..de6aed367 100644 --- a/cmd/opm/alpha/template/basic.go +++ b/cmd/opm/alpha/template/basic.go @@ -62,7 +62,9 @@ When FILE is '-' or not provided, the template is read from standard input`, if err != nil { log.Fatalf("creating containerd registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() var m *migrations.Migrations if migrateLevel != "" { diff --git a/cmd/opm/alpha/template/semver.go b/cmd/opm/alpha/template/semver.go index 97dccbc6c..eb07ab568 100644 --- a/cmd/opm/alpha/template/semver.go +++ b/cmd/opm/alpha/template/semver.go @@ -68,7 +68,9 @@ When FILE is '-' or not provided, the template is read from standard input`, if err != nil { log.Fatalf("creating containerd registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() var m *migrations.Migrations if migrateLevel != "" { diff --git a/cmd/opm/generate/cmd.go b/cmd/opm/generate/cmd.go index 0bf5b6c9b..7eb2315d8 100644 --- a/cmd/opm/generate/cmd.go +++ b/cmd/opm/generate/cmd.go @@ -99,7 +99,7 @@ A separate builder and base image can be specified. The builder image may not be cmd.Flags().StringVarP(&baseImage, "base-image", "i", containertools.DefaultBinarySourceImage, "Image base to use to build catalog.") cmd.Flags().StringVarP(&builderImage, "builder-image", "b", containertools.DefaultBinarySourceImage, "Image to use as a build stage.") cmd.Flags().StringSliceVarP(&extraLabelStrs, "extra-labels", "l", []string{}, "Extra labels to include in the generated Dockerfile. Labels should be of the form 'key=value'.") - cmd.Flags().MarkDeprecated("binary-image", "use --base-image instead") + _ = cmd.Flags().MarkDeprecated("binary-image", "use --base-image instead") cmd.MarkFlagsMutuallyExclusive("binary-image", "base-image") return cmd } diff --git a/cmd/opm/internal/util/util.go b/cmd/opm/internal/util/util.go index 9e0e006be..e007caa48 100644 --- a/cmd/opm/internal/util/util.go +++ b/cmd/opm/internal/util/util.go @@ -46,7 +46,7 @@ func GetTLSOptions(cmd *cobra.Command) (bool, bool, error) { // This works in tandem with opm/index/cmd, which adds the relevant flags as persistent // as part of the root command (cmd/root/cmd) initialization func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) { - skipTlsVerify, useHTTP, err := GetTLSOptions(cmd) + skipTLSVerify, useHTTP, err := GetTLSOptions(cmd) if err != nil { return nil, err } @@ -58,7 +58,7 @@ func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) reg, err := containerdregistry.NewRegistry( containerdregistry.WithCacheDir(cacheDir), - containerdregistry.SkipTLSVerify(skipTlsVerify), + containerdregistry.SkipTLSVerify(skipTLSVerify), containerdregistry.WithPlainHTTP(useHTTP), containerdregistry.WithLog(log.Null()), ) diff --git a/cmd/opm/main.go b/cmd/opm/main.go index 2359df458..ce734a1a0 100644 --- a/cmd/opm/main.go +++ b/cmd/opm/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "errors" "os" "os/signal" "syscall" @@ -20,15 +21,17 @@ func main() { defer cancel() if err := cmd.ExecuteContext(ctx); err != nil { - agg, ok := err.(utilerrors.Aggregate) - if !ok { + var agg utilerrors.Aggregate + if !errors.As(err, &agg) { os.Exit(1) } for _, e := range agg.Errors() { - if _, ok := e.(registrylib.BundleImageAlreadyAddedErr); ok { + var bundleAlreadyAddedErr registrylib.BundleImageAlreadyAddedErr + if errors.As(e, &bundleAlreadyAddedErr) { os.Exit(2) } - if _, ok := e.(registrylib.PackageVersionAlreadyAddedErr); ok { + var packageVersionAlreadyAddedErr registrylib.PackageVersionAlreadyAddedErr + if errors.As(e, &packageVersionAlreadyAddedErr) { os.Exit(3) } } diff --git a/cmd/opm/render/cmd.go b/cmd/opm/render/cmd.go index 683d11a1c..f1923406c 100644 --- a/cmd/opm/render/cmd.go +++ b/cmd/opm/render/cmd.go @@ -55,7 +55,9 @@ database files. if err != nil { log.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() render.Registry = reg diff --git a/cmd/opm/serve/serve.go b/cmd/opm/serve/serve.go index 011286c6f..2d5e913cc 100644 --- a/cmd/opm/serve/serve.go +++ b/cmd/opm/serve/serve.go @@ -44,7 +44,7 @@ type serve struct { } const ( - defaultCpuStartupPath string = "/debug/pprof/startup/cpu" + defaultCPUStartupPath string = "/debug/pprof/startup/cpu" ) func NewCmd() *cobra.Command { @@ -99,7 +99,7 @@ func (s *serve) run(ctx context.Context) error { return fmt.Errorf("could not start pprof endpoint: %v", err) } if s.captureProfiles { - if err := p.startCpuProfileCache(); err != nil { + if err := p.startCPUProfileCache(); err != nil { return fmt.Errorf("could not start CPU profile: %v", err) } } @@ -169,7 +169,7 @@ func (s *serve) run(ctx context.Context) error { health.RegisterHealthServer(grpcServer, server.NewHealthServer()) reflection.Register(grpcServer) mainLogger.Info("serving registry") - p.stopCpuProfileCache() + p.stopCPUProfileCache() go func() { <-ctx.Done() @@ -224,7 +224,7 @@ func (p *profilerInterface) startEndpoint() error { mux.HandleFunc("/debug/pprof/profile", endpoint.Profile) mux.HandleFunc("/debug/pprof/symbol", endpoint.Symbol) mux.HandleFunc("/debug/pprof/trace", endpoint.Trace) - mux.HandleFunc(defaultCpuStartupPath, p.httpHandler) + mux.HandleFunc(defaultCPUStartupPath, p.httpHandler) p.server = http.Server{ Addr: p.addr, @@ -249,13 +249,13 @@ func (p *profilerInterface) startEndpoint() error { return nil } -func (p *profilerInterface) startCpuProfileCache() error { +func (p *profilerInterface) startCPUProfileCache() error { // short-circuit if not enabled if !p.isEnabled() { return nil } - p.logger.Infof("start caching cpu profile data at %q", defaultCpuStartupPath) + p.logger.Infof("start caching cpu profile data at %q", defaultCPUStartupPath) if err := pprof.StartCPUProfile(&p.cache); err != nil { return err } @@ -263,7 +263,7 @@ func (p *profilerInterface) startCpuProfileCache() error { return nil } -func (p *profilerInterface) stopCpuProfileCache() { +func (p *profilerInterface) stopCPUProfileCache() { // short-circuit if not enabled if !p.isEnabled() { return @@ -277,7 +277,7 @@ func (p *profilerInterface) httpHandler(w http.ResponseWriter, r *http.Request) if !p.isCacheReady() { http.Error(w, "cpu profile cache is not yet ready", http.StatusServiceUnavailable) } - w.Write(p.cache.Bytes()) + _, _ = w.Write(p.cache.Bytes()) } func (p *profilerInterface) stopEndpoint(ctx context.Context) error { diff --git a/pkg/api/api_to_model.go b/pkg/api/api_to_model.go index 5c0cb603a..50088ab4f 100644 --- a/pkg/api/api_to_model.go +++ b/pkg/api/api_to_model.go @@ -42,6 +42,7 @@ func ConvertAPIBundleToModelBundle(b *Bundle) (*model.Bundle, error) { } func convertAPIBundleToModelProperties(b *Bundle) ([]property.Property, error) { + // nolint:prealloc var out []property.Property providedGVKs := map[property.GVK]struct{}{} diff --git a/pkg/api/model_to_api.go b/pkg/api/model_to_api.go index 40ccdefee..73162d255 100644 --- a/pkg/api/model_to_api.go +++ b/pkg/api/model_to_api.go @@ -21,8 +21,8 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { return nil, fmt.Errorf("parse properties: %v", err) } - csvJson := b.CsvJSON - if csvJson == "" && len(props.CSVMetadatas) == 1 { + csvJSON := b.CsvJSON + if csvJSON == "" && len(props.CSVMetadatas) == 1 { var icons []v1alpha1.Icon if b.Package.Icon != nil { icons = []v1alpha1.Icon{{ @@ -47,9 +47,9 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { if err != nil { return nil, err } - csvJson = string(csvData) + csvJSON = string(csvData) if len(b.Objects) == 0 { - b.Objects = []string{csvJson} + b.Objects = []string{csvJSON} } } @@ -77,7 +77,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { Properties: convertModelPropertiesToAPIProperties(b.Properties), Replaces: b.Replaces, Skips: b.Skips, - CsvJson: csvJson, + CsvJson: csvJSON, Object: b.Objects, Deprecation: deprecation, }, nil @@ -128,6 +128,7 @@ func csvMetadataToCsv(m property.CSVMetadata) v1alpha1.ClusterServiceVersion { } func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -139,6 +140,7 @@ func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { return out } func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ diff --git a/pkg/cache/json.go b/pkg/cache/json.go index fc73431e9..92b17f752 100644 --- a/pkg/cache/json.go +++ b/pkg/cache/json.go @@ -102,11 +102,11 @@ func (q *jsonBackend) GetPackageIndex(_ context.Context) (packageIndex, error) { } func (q *jsonBackend) PutPackageIndex(_ context.Context, pi packageIndex) error { - packageJson, err := json.Marshal(pi) + packageJSON, err := json.Marshal(pi) if err != nil { return err } - if err := os.WriteFile(filepath.Join(q.baseDir, jsonPackagesFile), packageJson, jsonCacheModeFile); err != nil { + if err := os.WriteFile(filepath.Join(q.baseDir, jsonPackagesFile), packageJSON, jsonCacheModeFile); err != nil { return err } return nil diff --git a/pkg/cache/pkgs.go b/pkg/cache/pkgs.go index e590823b4..c89a9aab4 100644 --- a/pkg/cache/pkgs.go +++ b/pkg/cache/pkgs.go @@ -14,6 +14,7 @@ import ( type packageIndex map[string]cPkg func (pkgs packageIndex) ListPackages(_ context.Context) ([]string, error) { + // nolint:prealloc var packages []string for pkgName := range pkgs { packages = append(packages, pkgName) @@ -27,6 +28,7 @@ func (pkgs packageIndex) GetPackage(_ context.Context, name string) (*registry.P return nil, fmt.Errorf("package %q not found", name) } + // nolint:prealloc var channels []registry.PackageChannel for _, ch := range pkg.Channels { var deprecation *registry.Deprecation diff --git a/pkg/cache/pogrebv1.go b/pkg/cache/pogrebv1.go index a340b4458..78eb13fd8 100644 --- a/pkg/cache/pogrebv1.go +++ b/pkg/cache/pogrebv1.go @@ -38,7 +38,7 @@ const ( pograbV1CacheDir = FormatPogrebV1 pogrebDigestFile = pograbV1CacheDir + "/digest" - pogrebDbDir = pograbV1CacheDir + "/db" + pogrebDBDir = pograbV1CacheDir + "/db" ) type pogrebV1Backend struct { @@ -76,7 +76,7 @@ func (q *pogrebV1Backend) Init() error { } func (q *pogrebV1Backend) Open() error { - db, err := pogreb.Open(filepath.Join(q.baseDir, pogrebDbDir), &pogreb.Options{FileSystem: pogrebfs.OSMMap}) + db, err := pogreb.Open(filepath.Join(q.baseDir, pogrebDBDir), &pogreb.Options{FileSystem: pogrebfs.OSMMap}) if err != nil { return err } @@ -93,7 +93,7 @@ func (q *pogrebV1Backend) Close() error { } // Recursively fixup permissions on the DB directory. - return filepath.Walk(filepath.Join(q.baseDir, pogrebDbDir), func(path string, info os.FileInfo, err error) error { + return filepath.Walk(filepath.Join(q.baseDir, pogrebDBDir), func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -128,11 +128,11 @@ func (q *pogrebV1Backend) GetPackageIndex(_ context.Context) (packageIndex, erro } func (q *pogrebV1Backend) PutPackageIndex(_ context.Context, index packageIndex) error { - packageJson, err := json.Marshal(index) + packageJSON, err := json.Marshal(index) if err != nil { return err } - return q.db.Put([]byte("packages.json"), packageJson) + return q.db.Put([]byte("packages.json"), packageJSON) } func (q *pogrebV1Backend) dbKey(in bundleKey) []byte { diff --git a/pkg/cache/tar.go b/pkg/cache/tar.go index 92e83c181..2c00c55b2 100644 --- a/pkg/cache/tar.go +++ b/pkg/cache/tar.go @@ -14,7 +14,7 @@ import ( // of archives produced by this function do not need to account for differences in // permissions between source and destination filesystems. func fsToTar(w io.Writer, fsys fs.FS, buf []byte) error { - if buf == nil || len(buf) == 0 { + if len(buf) == 0 { // We are not sensitive to the size of this buffer, we just need it to be shared. // For simplicity, do the same as io.Copy() would. buf = make([]byte, 32*1024) diff --git a/pkg/client/client.go b/pkg/client/client.go index ed3637dae..552d0beff 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -2,6 +2,7 @@ package client import ( "context" + "errors" "io" "time" @@ -49,7 +50,7 @@ func (it *BundleIterator) Next() *api.Bundle { return nil } next, err := it.stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { diff --git a/pkg/client/errors.go b/pkg/client/errors.go index 948012c9f..b9320501d 100644 --- a/pkg/client/errors.go +++ b/pkg/client/errors.go @@ -51,6 +51,7 @@ func IsErrorUnrecoverable(err error) bool { } func reasonForError(err error) HealthErrorReason { + // nolint:errorlint switch t := err.(type) { case HealthError: return t.Reason diff --git a/pkg/client/kubeclient.go b/pkg/client/kubeclient.go index 17a6532f8..7b63e1c55 100644 --- a/pkg/client/kubeclient.go +++ b/pkg/client/kubeclient.go @@ -10,13 +10,14 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func NewKubeClient(kubeconfig string, logger *logrus.Logger) (clientset *kubernetes.Clientset, err error) { +func NewKubeClient(kubeconfig string, logger *logrus.Logger) (*kubernetes.Clientset, error) { var config *rest.Config if overrideConfig := os.Getenv(clientcmd.RecommendedConfigPathEnvVar); overrideConfig != "" { kubeconfig = overrideConfig } + var err error if kubeconfig != "" { logger.Infof("Loading kube client config from path %q", kubeconfig) config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) @@ -26,10 +27,11 @@ func NewKubeClient(kubeconfig string, logger *logrus.Logger) (clientset *kuberne } if err != nil { + // nolint:stylecheck err = fmt.Errorf("Cannot load config for REST client: %v", err) - return + return nil, err } - clientset, err = kubernetes.NewForConfig(config) - return + clientset, err := kubernetes.NewForConfig(config) + return clientset, err } diff --git a/pkg/configmap/configmap.go b/pkg/configmap/configmap.go index 0c95407e2..2b310371f 100644 --- a/pkg/configmap/configmap.go +++ b/pkg/configmap/configmap.go @@ -32,10 +32,11 @@ type BundleLoader struct { // creates an operator registry Bundle object. // If the Data section has a PackageManifest resource then it is also // deserialized and included in the result. -func (l *BundleLoader) Load(cm *corev1.ConfigMap) (bundle *api.Bundle, err error) { +func (l *BundleLoader) Load(cm *corev1.ConfigMap) (*api.Bundle, error) { + var err error if cm == nil { err = errors.New("ConfigMap must not be ") - return + return nil, err } logger := l.logger.WithFields(logrus.Fields{ @@ -45,15 +46,15 @@ func (l *BundleLoader) Load(cm *corev1.ConfigMap) (bundle *api.Bundle, err error bundle, skipped, bundleErr := loadBundle(logger, cm) if bundleErr != nil { err = fmt.Errorf("failed to extract bundle from configmap - %v", bundleErr) - return + return nil, err } l.logger.Debugf("couldn't unpack skipped: %#v", skipped) - return + return bundle, nil } -func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (bundle *api.Bundle, skipped map[string]string, err error) { - bundle = &api.Bundle{Object: []string{}} - skipped = map[string]string{} +func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (*api.Bundle, map[string]string, error) { + bundle := &api.Bundle{Object: []string{}} + skipped := map[string]string{} data := cm.Data if hasGzipEncodingAnnotation(cm) { @@ -95,7 +96,7 @@ func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (bundle *api.Bundle, logger.Infof("added to bundle, Kind=%s", resource.GetKind()) } - return + return bundle, skipped, nil } func decodeGzipBinaryData(cm *corev1.ConfigMap) (map[string]string, error) { diff --git a/pkg/containertools/containertool.go b/pkg/containertools/containertool.go index ea38c21d1..4e87894b0 100644 --- a/pkg/containertools/containertool.go +++ b/pkg/containertools/containertool.go @@ -8,7 +8,8 @@ const ( DockerTool ) -func (t ContainerTool) String() (s string) { +func (t ContainerTool) String() string { + var s string switch t { case NoneTool: s = "none" @@ -17,7 +18,7 @@ func (t ContainerTool) String() (s string) { case DockerTool: s = "docker" } - return + return s } func (t ContainerTool) CommandFactory() CommandFactory { @@ -30,7 +31,8 @@ func (t ContainerTool) CommandFactory() CommandFactory { return &StubCommandFactory{} } -func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { +func NewContainerTool(s string, defaultTool ContainerTool) ContainerTool { + var t ContainerTool switch s { case "podman": t = PodmanTool @@ -41,7 +43,7 @@ func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { default: t = defaultTool } - return + return t } // NewCommandContainerTool returns a tool that can be used in `exec` statements. diff --git a/pkg/containertools/dockerfilegenerator.go b/pkg/containertools/dockerfilegenerator.go index 79059b9ee..9a2e4a363 100644 --- a/pkg/containertools/dockerfilegenerator.go +++ b/pkg/containertools/dockerfilegenerator.go @@ -9,7 +9,7 @@ import ( const ( DefaultBinarySourceImage = "quay.io/operator-framework/opm:latest" - DefaultDbLocation = "/database/index.db" + DefaultDBLocation = "/database/index.db" DbLocationLabel = "operators.operatorframework.io.index.database.v1" ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" ) @@ -46,13 +46,13 @@ func (g *IndexDockerfileGenerator) GenerateIndexDockerfile(binarySourceImage, da dockerfile += fmt.Sprintf("FROM %s\n", binarySourceImage) // Labels - dockerfile += fmt.Sprintf("LABEL %s=%s\n", DbLocationLabel, DefaultDbLocation) + dockerfile += fmt.Sprintf("LABEL %s=%s\n", DbLocationLabel, DefaultDBLocation) // Content - dockerfile += fmt.Sprintf("ADD %s %s\n", databasePath, DefaultDbLocation) + dockerfile += fmt.Sprintf("ADD %s %s\n", databasePath, DefaultDBLocation) dockerfile += "EXPOSE 50051\n" dockerfile += "ENTRYPOINT [\"/bin/opm\"]\n" - dockerfile += fmt.Sprintf("CMD [\"registry\", \"serve\", \"--database\", \"%s\"]\n", DefaultDbLocation) + dockerfile += fmt.Sprintf("CMD [\"registry\", \"serve\", \"--database\", \"%s\"]\n", DefaultDBLocation) return dockerfile } diff --git a/pkg/containertools/labelreader.go b/pkg/containertools/labelreader.go index 57de73829..18ad46d98 100644 --- a/pkg/containertools/labelreader.go +++ b/pkg/containertools/labelreader.go @@ -71,5 +71,6 @@ func (r ImageLabelReader) GetLabelsFromImage(image string) (map[string]string, e return data[0].Labels, nil } + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse label data from container") } diff --git a/pkg/containertools/runner.go b/pkg/containertools/runner.go index 660c92c6a..79171bcb7 100644 --- a/pkg/containertools/runner.go +++ b/pkg/containertools/runner.go @@ -2,6 +2,7 @@ package containertools import ( + "errors" "fmt" "os/exec" "strings" @@ -133,7 +134,8 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err := command.Output() if err != nil { msg := err.Error() - if exitErr, ok := err.(*exec.ExitError); ok { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { msg = fmt.Sprintf("%s: %s", err, exitErr.Stderr) } return fmt.Errorf("error creating container %s: %s", string(out), msg) @@ -148,7 +150,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error copying container directory %s: %v", string(out), err) } @@ -160,7 +162,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error removing container %s: %v", string(out), err) } @@ -179,7 +181,7 @@ func (r *ContainerCommandRunner) Inspect(image string) ([]byte, error) { out, err := command.Output() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return nil, err } diff --git a/pkg/image/containerdregistry/options.go b/pkg/image/containerdregistry/options.go index d447dc155..bfe96e68d 100644 --- a/pkg/image/containerdregistry/options.go +++ b/pkg/image/containerdregistry/options.go @@ -60,22 +60,24 @@ func defaultConfig() *RegistryConfig { // NewRegistry returns a new containerd Registry and a function to destroy it after use. // The destroy function is safe to call more than once, but is a no-op after the first call. -func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { +func NewRegistry(options ...RegistryOption) (*Registry, error) { + var registry *Registry + config := defaultConfig() config.apply(options) - if err = config.complete(); err != nil { - return + if err := config.complete(); err != nil { + return nil, err } cs, err := contentlocal.NewStore(config.CacheDir) if err != nil { - return + return nil, err } var bdb *bolt.DB bdb, err = bolt.Open(config.DBPath, 0644, nil) if err != nil { - return + return nil, err } var once sync.Once @@ -107,7 +109,7 @@ func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { Architecture: "amd64", }), } - return + return registry, nil } type RegistryOption func(config *RegistryConfig) diff --git a/pkg/image/containerdregistry/registry.go b/pkg/image/containerdregistry/registry.go index 61fb5c73d..776b0fe83 100644 --- a/pkg/image/containerdregistry/registry.go +++ b/pkg/image/containerdregistry/registry.go @@ -143,7 +143,7 @@ func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string] } // Destroy cleans up the on-disk boltdb file and other cache files, unless preserve cache is true -func (r *Registry) Destroy() (err error) { +func (r *Registry) Destroy() error { return r.destroy() } diff --git a/pkg/image/execregistry/registry.go b/pkg/image/execregistry/registry.go index 40769d23e..0d299b66d 100644 --- a/pkg/image/execregistry/registry.go +++ b/pkg/image/execregistry/registry.go @@ -26,7 +26,7 @@ type Registry struct { var _ image.Registry = &Registry{} // NewRegistry instantiates and returns a new registry which manipulates images via exec podman/docker commands. -func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (registry *Registry, err error) { +func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (*Registry, error) { return &Registry{ log: logger, cmd: containertools.NewCommandRunner(tool, logger, opts...), diff --git a/pkg/lib/bundle/build.go b/pkg/lib/bundle/build.go index 08b0fa808..5bfb517fc 100644 --- a/pkg/lib/bundle/build.go +++ b/pkg/lib/bundle/build.go @@ -31,6 +31,7 @@ func ExecuteCommand(cmd *exec.Cmd) error { log.Debugf("Running %#v", cmd.Args) if err := cmd.Run(); err != nil { + // nolint:stylecheck return fmt.Errorf("Failed to exec %#v: %v", cmd.Args, err) } diff --git a/pkg/lib/bundle/errors.go b/pkg/lib/bundle/errors.go index 5e0735adf..869cf061c 100644 --- a/pkg/lib/bundle/errors.go +++ b/pkg/lib/bundle/errors.go @@ -12,6 +12,7 @@ type ValidationError struct { } func (v ValidationError) Error() string { + // nolint:prealloc var errs []string for _, err := range v.Errors { errs = append(errs, err.Error()) diff --git a/pkg/lib/bundle/generate.go b/pkg/lib/bundle/generate.go index 7fc1fbabc..72e781e0b 100644 --- a/pkg/lib/bundle/generate.go +++ b/pkg/lib/bundle/generate.go @@ -79,6 +79,7 @@ func GenerateFunc(directory, outputDir, packageName, channels, channelDefault st // Channels and packageName are required fields where as default channel is automatically filled if unspecified // and that either of the required field is missing. We are interpreting the bundle information through // bundle directory embedded in the package folder. + // nolint:nestif if channels == "" || packageName == "" { var notProvided []string if channels == "" { @@ -157,7 +158,8 @@ func GenerateFunc(directory, outputDir, packageName, channels, channelDefault st // It returns two strings. resultMetadata is the path to the output metadata/ folder. // resultManifests is the path to the output manifests/ folder -- if no copy occurred, // it just returns the input manifestDir -func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDir string, overwrite bool) (resultManifests, resultMetadata string, err error) { +func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDir string, overwrite bool) (string, string, error) { + var resultManifests, resultMetadata string // First, determine the parent directory of the metadata and manifest directories copyDir := "" @@ -204,6 +206,7 @@ func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDi // Currently able to detect helm chart, registry+v1 (CSV) and plain k8s resources // such as CRD. func GetMediaType(directory string) (string, error) { + // nolint:prealloc var files []string k8sFiles := make(map[string]*unstructured.Unstructured) @@ -219,6 +222,7 @@ func GetMediaType(directory string) (string, error) { fileWithPath := filepath.Join(directory, item.Name()) fileBlob, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck return "", fmt.Errorf("Unable to read file %s in bundle", fileWithPath) } @@ -230,6 +234,7 @@ func GetMediaType(directory string) (string, error) { } if len(files) == 0 { + // nolint:stylecheck return "", fmt.Errorf("The directory %s contains no yaml files", directory) } @@ -276,11 +281,13 @@ func ValidateAnnotations(existing, expected []byte) error { for label, item := range expectedAnnotations.Annotations { value, hasAnnotation := fileAnnotations.Annotations[label] if !hasAnnotation { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Missing field: %s", label)) continue } if item != value { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Expect field %q to have value %q instead of %q", label, item, value)) } @@ -443,6 +450,7 @@ func copyManifestDir(from, to string, overwrite bool) error { return nil } +// nolint:unused func containsString(slice []string, s string) bool { for _, item := range slice { if item == s { diff --git a/pkg/lib/bundle/interpreter.go b/pkg/lib/bundle/interpreter.go index 364fa4890..7be06990f 100644 --- a/pkg/lib/bundle/interpreter.go +++ b/pkg/lib/bundle/interpreter.go @@ -32,7 +32,8 @@ func NewBundleDirInterperter(bundleDir string) (*bundleDirInterpreter, error) { return &bundleDirInterpreter{bundleCsvName: csv.GetName(), pkg: p}, nil } -func (b *bundleDirInterpreter) GetBundleChannels() (channelNames []string) { +func (b *bundleDirInterpreter) GetBundleChannels() ([]string) { + var channelNames []string for channelName, channel := range b.pkg.Channels { for bundle := range channel.Nodes { if bundle.CsvName == b.bundleCsvName { @@ -42,7 +43,7 @@ func (b *bundleDirInterpreter) GetBundleChannels() (channelNames []string) { } } sort.Strings(channelNames) - return + return channelNames } func (b *bundleDirInterpreter) GetDefaultChannel() string { diff --git a/pkg/lib/bundle/validate.go b/pkg/lib/bundle/validate.go index 18708186a..c74401026 100644 --- a/pkg/lib/bundle/validate.go +++ b/pkg/lib/bundle/validate.go @@ -100,10 +100,12 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } } - if manifestsFound == false { + if !manifestsFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate manifests directory")) } - if metadataFound == false { + if !metadataFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate metadata directory")) } @@ -145,6 +147,7 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } if !annotationsFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Could not find annotations file")) } else { i.logger.Debug("Found annotations file") @@ -186,6 +189,7 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) for label, item := range annotations { val, ok := fileAnnotations.Annotations[label] if !ok && label != ChannelDefaultLabel { + // nolint:stylecheck aErr := fmt.Errorf("Missing annotation %q", label) validationErrors = append(validationErrors, aErr) } @@ -193,26 +197,31 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) switch label { case MediatypeLabel: if item != val { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, item, val) validationErrors = append(validationErrors, aErr) } case ManifestsLabel: if item != ManifestsDir { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, ManifestsDir, val) validationErrors = append(validationErrors, aErr) } case MetadataDir: if item != MetadataLabel { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, MetadataDir, val) validationErrors = append(validationErrors, aErr) } case ChannelsLabel: if val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } case ChannelDefaultLabel: if ok && val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } @@ -292,6 +301,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { fileWithPath := filepath.Join(manifestDir, item.Name()) data, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to read file %s in supported types", fileWithPath)) continue } @@ -314,6 +324,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { continue } + // nolint:nestif if gvk.Kind == CSVKind { err := runtime.DefaultUnstructuredConverter.FromUnstructured(k8sFile.Object, csv) if err != nil { @@ -362,6 +373,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { } } default: + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unsupported api version of CRD: %s", gv)) } } else { diff --git a/pkg/lib/image/registry.go b/pkg/lib/image/registry.go index 2c5c7f07a..4d117c3cd 100644 --- a/pkg/lib/image/registry.go +++ b/pkg/lib/image/registry.go @@ -99,7 +99,7 @@ func RunDockerRegistry(ctx context.Context, rootDir string, configOpts ...Config } }() - err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { + err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { tr := &http.Transport{TLSClientConfig: &tls.Config{ InsecureSkipVerify: false, RootCAs: certPool, @@ -109,6 +109,7 @@ func RunDockerRegistry(ctx context.Context, rootDir string, configOpts ...Config if err != nil { return false, nil } + defer r.Body.Close() if r.StatusCode == http.StatusOK { return true, nil } diff --git a/pkg/lib/indexer/indexer.go b/pkg/lib/indexer/indexer.go index 6e53a9f20..5f0b6bd7a 100644 --- a/pkg/lib/indexer/indexer.go +++ b/pkg/lib/indexer/indexer.go @@ -37,6 +37,7 @@ const ( concurrencyLimitForExport = 10 ) +// nolint:stylecheck var ErrFileBasedCatalogPrune = errors.New("`opm index prune` only supports sqlite-based catalogs. See https://github.com/redhat-openshift-ecosystem/community-operators-prod/issues/793 for instructions on pruning a plaintext files backed catalog.") // ImageIndexer is a struct implementation of the Indexer interface @@ -409,9 +410,10 @@ func copyDatabaseTo(databaseFile, targetDir string) (string, error) { return to.Name(), err } -func buildContext(generate bool, requestedDockerfile string) (buildDir, outDockerfile string, cleanup func(), err error) { +func buildContext(generate bool, requestedDockerfile string) (string, string, func(), error) { + var buildDir, outDockerfile string // set cleanup to a no-op until explicitly set - cleanup = func() {} + cleanup := func() {} if generate { buildDir = "./" @@ -421,13 +423,13 @@ func buildContext(generate bool, requestedDockerfile string) (buildDir, outDocke outDockerfile = requestedDockerfile } cleanup = func() {} - return + return buildDir, outDockerfile, cleanup, nil } // set a temp directory for building the new image - buildDir, err = os.MkdirTemp(".", tmpBuildDirPrefix) + buildDir, err := os.MkdirTemp(".", tmpBuildDirPrefix) if err != nil { - return + return "", "", cleanup, err } cleanup = func() { os.RemoveAll(buildDir) @@ -435,14 +437,14 @@ func buildContext(generate bool, requestedDockerfile string) (buildDir, outDocke if len(requestedDockerfile) > 0 { outDockerfile = requestedDockerfile - return + return buildDir, outDockerfile, cleanup, nil } // generate a temp dockerfile if needed tempDockerfile, err := os.CreateTemp(".", defaultDockerfileName) if err != nil { defer cleanup() - return + return "", "", cleanup, err } outDockerfile = tempDockerfile.Name() cleanup = func() { @@ -450,7 +452,7 @@ func buildContext(generate bool, requestedDockerfile string) (buildDir, outDocke os.Remove(outDockerfile) } - return + return buildDir, outDockerfile, cleanup, nil } func build(dockerfilePath, imageTag string, commandRunner containertools.CommandRunner, logger *logrus.Entry) error { diff --git a/pkg/lib/registry/registry.go b/pkg/lib/registry/registry.go index dc0bec859..d6be014e1 100644 --- a/pkg/lib/registry/registry.go +++ b/pkg/lib/registry/registry.go @@ -2,6 +2,7 @@ package registry import ( "context" + "errors" "fmt" "os" @@ -133,6 +134,7 @@ func unpackImage(ctx context.Context, reg image.Registry, ref image.Reference) ( func populate(ctx context.Context, loader registry.Load, graphLoader registry.GraphLoader, querier registry.Query, reg image.Registry, refs []image.Reference, mode registry.Mode, overwrite bool) error { unpackedImageMap := make(map[image.Reference]string, 0) overwrittenBundles := map[string][]string{} + // nolint:prealloc var imagesToAdd []*registry.Bundle for _, ref := range refs { to, from, cleanup, err := unpackImage(ctx, reg, ref) @@ -151,7 +153,7 @@ func populate(ctx context.Context, loader registry.Load, graphLoader registry.Gr if overwrite { overwritten, err := querier.GetBundlePathIfExists(ctx, img.Bundle.Name) if err != nil { - if err == registry.ErrBundleImageNotInDatabase { + if errors.Is(err, registry.ErrBundleImageNotInDatabase) { continue } return err @@ -391,6 +393,7 @@ func checkForBundlePaths(querier registry.GRPCQuery, bundlePaths []string) ([]st registryBundlePaths[b.BundlePath] = struct{}{} } + // nolint:prealloc var found, missing []string for _, b := range bundlePaths { if _, ok := registryBundlePaths[b]; ok { @@ -408,7 +411,7 @@ func checkForBundlePaths(querier registry.GRPCQuery, bundlePaths []string) ([]st // replaces mode selects highest version as channel head and // prunes any bundles in the upgrade chain after the channel head. // check for the presence of newly added bundles after a replaces-mode add. -func checkForBundles(ctx context.Context, q *sqlite.SQLQuerier, g registry.GraphLoader, required []*registry.Bundle) error { +func checkForBundles(_ context.Context, _ *sqlite.SQLQuerier, g registry.GraphLoader, required []*registry.Bundle) error { var errs []error for _, bundle := range required { graph, err := g.Generate(bundle.Package) diff --git a/pkg/lib/semver/semver.go b/pkg/lib/semver/semver.go index 6875566d0..60721cdaf 100644 --- a/pkg/lib/semver/semver.go +++ b/pkg/lib/semver/semver.go @@ -8,6 +8,7 @@ import ( // BuildIdCompare compares two versions and returns negative one if the first arg is less than the second arg, positive one if it is larger, and zero if they are equal. // This comparison follows typical semver precedence rules, with one addition: whenever two versions are equal with the exception of their build-ids, the build-ids are compared using prerelease precedence rules. Further, versions with no build-id are always less than versions with build-ids; e.g. 1.0.0 < 1.0.0+1. +// nolint:stylecheck func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { if c := b.Compare(v); c != 0 { return c, nil @@ -27,6 +28,7 @@ func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { } func buildAsPrerelease(v semver.Version) (*semver.Version, error) { + // nolint:prealloc var pre []semver.PRVersion for _, b := range v.Build { p, err := semver.NewPRVersion(b) diff --git a/pkg/lib/tmp/copy.go b/pkg/lib/tmp/copy.go index a48a3e219..f72a59ad8 100644 --- a/pkg/lib/tmp/copy.go +++ b/pkg/lib/tmp/copy.go @@ -7,7 +7,7 @@ import ( ) // CopyTmpDB reads the file at the given path and copies it to a tmp directory, returning the copied file path or an err -func CopyTmpDB(original string) (path string, err error) { +func CopyTmpDB(original string) (string, error) { dst, err := os.CreateTemp("", "db-") if err != nil { return "", err diff --git a/pkg/lib/unstructured/unstructured.go b/pkg/lib/unstructured/unstructured.go index 11ff9cc89..bc81d227d 100644 --- a/pkg/lib/unstructured/unstructured.go +++ b/pkg/lib/unstructured/unstructured.go @@ -49,7 +49,7 @@ func FromDir(dirpath string) ([]*unstructured.Unstructured, error) { return nil, err } - unsts := make([]*unstructured.Unstructured, 0, 0) + unsts := make([]*unstructured.Unstructured, 0) for _, file := range files { unst, err := FromFile(path.Join(dirpath, file.Name())) if err != nil { diff --git a/pkg/lib/validation/bundle.go b/pkg/lib/validation/bundle.go index 190fbbcf1..a88b7b630 100644 --- a/pkg/lib/validation/bundle.go +++ b/pkg/lib/validation/bundle.go @@ -14,7 +14,8 @@ import ( var RegistryBundleValidator interfaces.Validator = interfaces.ValidatorFunc(validateBundles) -func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { +func validateBundles(objs ...interface{}) []errors.ManifestResult { + var results []errors.ManifestResult for _, obj := range objs { switch v := obj.(type) { case *registry.Bundle: @@ -24,7 +25,8 @@ func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { return results } -func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { +func validateBundle(bundle *registry.Bundle) errors.ManifestResult { + var result errors.ManifestResult csv, err := bundle.ClusterServiceVersion() if err != nil { result.Add(errors.ErrInvalidParse("error getting bundle CSV", err)) @@ -40,7 +42,8 @@ func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { return result } -func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) (result errors.ManifestResult) { +func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) errors.ManifestResult { + var result errors.ManifestResult ownedKeys, _, err := csv.GetCustomResourceDefintions() if err != nil { result.Add(errors.ErrInvalidParse("error getting CSV CRDs", err)) diff --git a/pkg/mirror/options.go b/pkg/mirror/options.go index c9d3b3d9e..51c004faa 100644 --- a/pkg/mirror/options.go +++ b/pkg/mirror/options.go @@ -44,30 +44,30 @@ func (o *IndexImageMirrorerOptions) Complete() error { } // Apply sequentially applies the given options to the config. -func (c *IndexImageMirrorerOptions) Apply(options []ImageIndexMirrorOption) { +func (o *IndexImageMirrorerOptions) Apply(options []ImageIndexMirrorOption) { for _, option := range options { - option(c) + option(o) } } // ToOption converts an IndexImageMirrorerOptions object into a function that applies // its current configuration to another IndexImageMirrorerOptions instance -func (c *IndexImageMirrorerOptions) ToOption() ImageIndexMirrorOption { - return func(o *IndexImageMirrorerOptions) { - if c.ImageMirrorer != nil { - o.ImageMirrorer = c.ImageMirrorer +func (o *IndexImageMirrorerOptions) ToOption() ImageIndexMirrorOption { + return func(io *IndexImageMirrorerOptions) { + if o.ImageMirrorer != nil { + io.ImageMirrorer = o.ImageMirrorer } - if c.DatabaseExtractor != nil { - o.DatabaseExtractor = c.DatabaseExtractor + if o.DatabaseExtractor != nil { + io.DatabaseExtractor = o.DatabaseExtractor } - if c.Source != "" { - o.Source = c.Source + if o.Source != "" { + io.Source = o.Source } - if c.Dest != "" { - o.Dest = c.Dest + if o.Dest != "" { + io.Dest = o.Dest } - if c.ManifestDir != "" { - o.ManifestDir = c.ManifestDir + if o.ManifestDir != "" { + io.ManifestDir = o.ManifestDir } } } diff --git a/pkg/prettyunmarshaler/prettyunmarshaler.go b/pkg/prettyunmarshaler/prettyunmarshaler.go index abbf586fd..788428440 100644 --- a/pkg/prettyunmarshaler/prettyunmarshaler.go +++ b/pkg/prettyunmarshaler/prettyunmarshaler.go @@ -8,29 +8,29 @@ import ( "strings" ) -type JsonUnmarshalError struct { +type JSONUnmarshalError struct { data []byte offset int64 err error } -func NewJSONUnmarshalError(data []byte, err error) *JsonUnmarshalError { +func NewJSONUnmarshalError(data []byte, err error) *JSONUnmarshalError { var te *json.UnmarshalTypeError if errors.As(err, &te) { - return &JsonUnmarshalError{data: data, offset: te.Offset, err: te} + return &JSONUnmarshalError{data: data, offset: te.Offset, err: te} } var se *json.SyntaxError if errors.As(err, &se) { - return &JsonUnmarshalError{data: data, offset: se.Offset, err: se} + return &JSONUnmarshalError{data: data, offset: se.Offset, err: se} } - return &JsonUnmarshalError{data: data, offset: -1, err: err} + return &JSONUnmarshalError{data: data, offset: -1, err: err} } -func (e *JsonUnmarshalError) Error() string { +func (e *JSONUnmarshalError) Error() string { return e.err.Error() } -func (e *JsonUnmarshalError) Pretty() string { +func (e *JSONUnmarshalError) Pretty() string { if len(e.data) == 0 || e.offset < 0 || e.offset > int64(len(e.data)) { return e.err.Error() } diff --git a/pkg/registry/bundle.go b/pkg/registry/bundle.go index e87260204..2da04d54a 100644 --- a/pkg/registry/bundle.go +++ b/pkg/registry/bundle.go @@ -167,6 +167,7 @@ func (b *Bundle) CustomResourceDefinitions() ([]runtime.Object, error) { if err := b.cache(); err != nil { return nil, err } + // nolint:prealloc var crds []runtime.Object for _, crd := range b.v1crds { crds = append(crds, crd) @@ -276,10 +277,18 @@ func (b *Bundle) AllProvidedAPIsInBundle() error { return nil } -func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +// (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +func (b *Bundle) Serialize() (string, string, []byte, []byte, []byte, error) { + var bundleBytes []byte + var csvName string + var csvBytes []byte + var annotationBytes []byte + var err error + csvCount := 0 for _, obj := range b.Objects { - objBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + var objBytes []byte + objBytes, err = runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return "", "", nil, nil, nil, err } @@ -299,7 +308,7 @@ func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bund } if b.Annotations != nil { - annotationBytes, err = json.Marshal(b.Annotations) + annotationBytes, _ = json.Marshal(b.Annotations) } return csvName, b.BundleImage, csvBytes, bundleBytes, annotationBytes, nil diff --git a/pkg/registry/bundlegraphloader.go b/pkg/registry/bundlegraphloader.go index 3995a88c9..2854003a2 100644 --- a/pkg/registry/bundlegraphloader.go +++ b/pkg/registry/bundlegraphloader.go @@ -16,6 +16,7 @@ type BundleGraphLoader struct { func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, annotations *AnnotationsFile, skippatch bool) (*Package, error) { bundleVersion, err := bundle.Version() if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to extract bundle version from bundle %s, can't insert in semver mode", bundle.BundleImage) } @@ -43,6 +44,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann if graph.DefaultChannel == "" { // Infer default channel from channel list if annotations.SelectDefaultChannel() == "" { + // nolint:stylecheck return nil, fmt.Errorf("Default channel is missing and can't be inferred") } graph.DefaultChannel = annotations.SelectDefaultChannel() @@ -83,6 +85,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann for node := range channelGraph.Nodes { nodeVersion, err := semver.Make(node.Version) if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse existing bundle version stored in index %s %s %s", node.CsvName, node.Version, node.BundlePath) } diff --git a/pkg/registry/channelupdateoptions.go b/pkg/registry/channelupdateoptions.go index 85f5acb40..d45bd414e 100644 --- a/pkg/registry/channelupdateoptions.go +++ b/pkg/registry/channelupdateoptions.go @@ -22,6 +22,7 @@ func GetModeFromString(mode string) (Mode, error) { case "semver-skippatch": return SkipPatchMode, nil default: + // nolint:stylecheck return -1, fmt.Errorf("Invalid channel update mode %s specified", mode) } } diff --git a/pkg/registry/csv.go b/pkg/registry/csv.go index 69abb61f6..20a791185 100644 --- a/pkg/registry/csv.go +++ b/pkg/registry/csv.go @@ -7,7 +7,7 @@ import ( "os" "path" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -20,6 +20,7 @@ import ( const ( // Name of the CSV's kind + // nolint:unused clusterServiceVersionKind = "ClusterServiceVersion" // Name of the section under which the list of owned and required list of @@ -44,9 +45,11 @@ const ( icon = "icon" // The yaml attribute that points to the icon.base64data for the ClusterServiceVersion + // nolint:unused base64data = "base64data" // The yaml attribute that points to the icon.mediatype for the ClusterServiceVersion + // nolint:unused mediatype = "mediatype" // The yaml attribute that points to the description for the ClusterServiceVersion description = "description" @@ -223,16 +226,16 @@ func (csv *ClusterServiceVersion) GetSkips() ([]string, error) { // // If owned or required is not defined in the spec then an empty list is // returned respectively. -func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*DefinitionKey, required []*DefinitionKey, err error) { +func (csv *ClusterServiceVersion) GetCustomResourceDefintions() ([]*DefinitionKey, []*DefinitionKey, error) { var objmap map[string]*json.RawMessage - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, nil, err } rawValue, ok := objmap[customResourceDefinitions] if !ok || rawValue == nil { - return + return nil, nil, nil } var definitions struct { @@ -240,13 +243,11 @@ func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*Defini Required []*DefinitionKey `json:"required"` } - if err = json.Unmarshal(*rawValue, &definitions); err != nil { - return + if err := json.Unmarshal(*rawValue, &definitions); err != nil { + return nil, nil, err } - owned = definitions.Owned - required = definitions.Required - return + return definitions.Owned, definitions.Required, nil } // GetApiServiceDefinitions returns a list of owned and required @@ -287,17 +288,17 @@ func (csv *ClusterServiceVersion) GetApiServiceDefinitions() (owned []*Definitio } // GetRelatedImage returns the list of associated images for the operator -func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct{}, err error) { +func (csv *ClusterServiceVersion) GetRelatedImages() (map[string]struct{}, error) { var objmap map[string]*json.RawMessage - imageSet = make(map[string]struct{}) + imageSet := make(map[string]struct{}) - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, err } rawValue, ok := objmap[relatedImages] if !ok || rawValue == nil { - return + return imageSet, nil } type relatedImage struct { @@ -305,15 +306,15 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct Ref string `json:"image"` } var relatedImages []relatedImage - if err = json.Unmarshal(*rawValue, &relatedImages); err != nil { - return + if err := json.Unmarshal(*rawValue, &relatedImages); err != nil { + return nil, err } for _, img := range relatedImages { imageSet[img.Ref] = struct{}{} } - return + return imageSet, nil } // GetOperatorImages returns a list of any images used to run the operator. @@ -321,7 +322,7 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct func (csv *ClusterServiceVersion) GetOperatorImages() (map[string]struct{}, error) { type dep struct { Name string - Spec v1.DeploymentSpec + Spec appsv1.DeploymentSpec } type strategySpec struct { Deployments []dep diff --git a/pkg/registry/decode.go b/pkg/registry/decode.go index 0a9587d09..1818cc305 100644 --- a/pkg/registry/decode.go +++ b/pkg/registry/decode.go @@ -13,36 +13,34 @@ import ( // DecodeUnstructured decodes a raw stream into a an // unstructured.Unstructured instance. -func DecodeUnstructured(reader io.Reader) (obj *unstructured.Unstructured, err error) { +func DecodeUnstructured(reader io.Reader) (*unstructured.Unstructured, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) t := &unstructured.Unstructured{} - if err = decoder.Decode(t); err != nil { - return + if err := decoder.Decode(t); err != nil { + return nil, err } - obj = t - return + return t, nil } // DecodePackageManifest decodes a raw stream into a a PackageManifest instance. // If a package name is empty we consider the object invalid! -func DecodePackageManifest(reader io.Reader) (manifest *PackageManifest, err error) { +func DecodePackageManifest(reader io.Reader) (*PackageManifest, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) obj := &PackageManifest{} if decodeErr := decoder.Decode(obj); decodeErr != nil { - err = fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) - return + err := fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) + return nil, err } if obj.PackageName == "" { - err = errors.New("name of package (packageName) is missing") - return + err := errors.New("name of package (packageName) is missing") + return nil, err } - manifest = obj - return + return obj, nil } func decodeFileFS(root fs.FS, path string, into interface{}, log *logrus.Entry) error { diff --git a/pkg/registry/directoryGraphLoader.go b/pkg/registry/directoryGraphLoader.go index d0aeda74f..4b7209188 100644 --- a/pkg/registry/directoryGraphLoader.go +++ b/pkg/registry/directoryGraphLoader.go @@ -76,6 +76,7 @@ func (g *DirGraphLoader) loadBundleCsvPathMap() error { } CsvNameAndReplaceMap := make(map[string]csvReplaces) for _, bundlePath := range bundleDirs { + //nolint:nestif if bundlePath.IsDir() { csvStruct, err := ReadCSVFromBundleDirectory(filepath.Join(g.PackageDir, bundlePath.Name())) if err != nil { diff --git a/pkg/registry/empty.go b/pkg/registry/empty.go index 936f39cca..7b0d78046 100644 --- a/pkg/registry/empty.go +++ b/pkg/registry/empty.go @@ -40,7 +40,7 @@ func (EmptyQuery) GetBundleForChannel(ctx context.Context, pkgName string, chann return nil, errors.New("empty querier: cannot get bundle for channel") } -func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that replace") } @@ -48,11 +48,11 @@ func (EmptyQuery) GetBundleThatReplaces(ctx context.Context, name, pkgName, chan return nil, errors.New("empty querier: cannot get bundle that replaces") } -func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that provide") } -func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get latest channel entries that provide") } @@ -68,7 +68,7 @@ func (EmptyQuery) GetImagesForBundle(ctx context.Context, bundleName string) ([] return nil, errors.New("empty querier: cannot get image list") } -func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { return nil, nil, errors.New("empty querier: cannot apis") } @@ -104,11 +104,11 @@ func (EmptyQuery) SendBundles(ctx context.Context, stream BundleSender) error { return errors.New("empty querier: cannot stream bundles") } -func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { return nil, errors.New("empty querier: cannot get dependencies for bundle") } -func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (bundlePath string, err error) { +func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (string, error) { return "", errors.New("empty querier: cannot get bundle path for bundle") } diff --git a/pkg/registry/parse.go b/pkg/registry/parse.go index 4725dbcf9..6af05bf39 100644 --- a/pkg/registry/parse.go +++ b/pkg/registry/parse.go @@ -157,6 +157,7 @@ func (b *bundleParser) addMetadata(metadata fs.FS, bundle *Bundle) error { bundle.Package = af.Annotations.PackageName bundle.Channels = af.GetChannels() } else { + // nolint:stylecheck return fmt.Errorf("Could not find annotations file") } @@ -185,6 +186,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { return nil, fmt.Errorf("bundle missing csv") } + // nolint:prealloc var derived []Property if len(csv.GetAnnotations()) > 0 { properties, ok := csv.GetAnnotations()[PropertyKey] @@ -237,6 +239,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { // propertySet returns the deduplicated set of a property list. func propertySet(properties []Property) []Property { var ( + // nolint:prealloc set []Property visited = map[string]struct{}{} ) diff --git a/pkg/registry/populator.go b/pkg/registry/populator.go index 730d27fb9..7747effc1 100644 --- a/pkg/registry/populator.go +++ b/pkg/registry/populator.go @@ -151,6 +151,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) // globalSanityCheck should have verified this to be a head without anything replacing it // and that we have a single overwrite per package + // nolint:nestif if len(i.overwrittenImages) > 0 { if overwriter, ok := i.loader.(HeadOverwriter); ok { // Assume loader has some way to handle overwritten heads if HeadOverwriter isn't implemented explicitly @@ -180,6 +181,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) } } default: + // nolint:stylecheck return fmt.Errorf("Unsupported update mode") } @@ -262,6 +264,7 @@ func (i *DirectoryPopulator) loadManifestsSemver(bundle *Bundle, skippatch bool) } // loadOperatorBundle adds the package information to the loader's store +// nolint:unused func (i *DirectoryPopulator) loadOperatorBundle(manifest PackageManifest, bundle *Bundle) error { if manifest.PackageName == "" { return nil diff --git a/pkg/registry/registry_to_model.go b/pkg/registry/registry_to_model.go index 0ba64c72d..cb80fca46 100644 --- a/pkg/registry/registry_to_model.go +++ b/pkg/registry/registry_to_model.go @@ -47,7 +47,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e if err := json.Unmarshal(p.Value, &v); err != nil { return nil, nil, property.ParseError{Idx: i, Typ: p.Type, Err: err} } - k := property.GVKRequired{Group: v.Group, Kind: v.Kind, Version: v.Version} + k := property.GVKRequired(v) requiredGVKs[k] = struct{}{} case property.TypePackage: var v property.Package @@ -91,6 +91,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e } var ( + // nolint:prealloc props []property.Property objects []string ) diff --git a/pkg/registry/types.go b/pkg/registry/types.go index 9b18b0661..4105aaa3d 100644 --- a/pkg/registry/types.go +++ b/pkg/registry/types.go @@ -286,6 +286,7 @@ func (gd *GVKDependency) Validate() []error { func (ld *LabelDependency) Validate() []error { errs := []error{} if *ld == (LabelDependency{}) { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Label information is missing")) } return errs @@ -295,13 +296,16 @@ func (ld *LabelDependency) Validate() []error { func (pd *PackageDependency) Validate() []error { errs := []error{} if pd.PackageName == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package name is empty")) } if pd.Version == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package version is empty")) } else { _, err := semver.ParseRange(pd.Version) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid semver format version")) } } @@ -312,15 +316,18 @@ func (pd *PackageDependency) Validate() []error { func (cc *CelConstraint) Validate() []error { errs := []error{} if cc.Cel == nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL field is missing")) } else { if cc.Cel.Rule == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL expression is missing")) return errs } validator := constraints.NewCelEnvironment() _, err := validator.Validate(cc.Cel.Rule) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid CEL expression: %s", err.Error())) } } @@ -329,6 +336,7 @@ func (cc *CelConstraint) Validate() []error { // GetDependencies returns the list of dependency func (d *DependenciesFile) GetDependencies() []*Dependency { + // nolint:prealloc var dependencies []*Dependency for _, item := range d.Dependencies { dep := item diff --git a/pkg/sqlite/configmap.go b/pkg/sqlite/configmap.go index 44e2302cc..a1ce927f8 100644 --- a/pkg/sqlite/configmap.go +++ b/pkg/sqlite/configmap.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +47,7 @@ func NewSQLLoaderForConfigMapData(logger *logrus.Entry, store registry.Load, con } } -func NewSQLLoaderForConfigMap(store registry.Load, configMap v1.ConfigMap) *ConfigMapLoader { +func NewSQLLoaderForConfigMap(store registry.Load, configMap corev1.ConfigMap) *ConfigMapLoader { logger := logrus.WithFields(logrus.Fields{"configmap": configMap.GetName(), "ns": configMap.GetNamespace()}) return &ConfigMapLoader{ log: logger, @@ -66,14 +66,14 @@ func (c *ConfigMapLoader) Populate() error { return fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCRDName) } - crdListJson, err := yaml.YAMLToJSON([]byte(crdListYaml)) + crdListJSON, err := yaml.YAMLToJSON([]byte(crdListYaml)) if err != nil { c.log.WithError(err).Debug("error loading CRD list") return err } var parsedCRDList []v1beta1.CustomResourceDefinition - if err := json.Unmarshal(crdListJson, &parsedCRDList); err != nil { + if err := json.Unmarshal(crdListJSON, &parsedCRDList); err != nil { c.log.WithError(err).Debug("error parsing CRD list") return err } @@ -106,14 +106,14 @@ func (c *ConfigMapLoader) Populate() error { errs = append(errs, fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCSVName)) return utilerrors.NewAggregate(errs) } - csvListJson, err := yaml.YAMLToJSON([]byte(csvListYaml)) + csvListJSON, err := yaml.YAMLToJSON([]byte(csvListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading CSV list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedCSVList []registry.ClusterServiceVersion - err = json.Unmarshal(csvListJson, &parsedCSVList) + err = json.Unmarshal(csvListJSON, &parsedCSVList) if err != nil { errs = append(errs, fmt.Errorf("error parsing CSV list: %s", err)) return utilerrors.NewAggregate(errs) @@ -164,14 +164,14 @@ func (c *ConfigMapLoader) Populate() error { return utilerrors.NewAggregate(errs) } - packageListJson, err := yaml.YAMLToJSON([]byte(packageListYaml)) + packageListJSON, err := yaml.YAMLToJSON([]byte(packageListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading package list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedPackageManifests []registry.PackageManifest - err = json.Unmarshal(packageListJson, &parsedPackageManifests) + err = json.Unmarshal(packageListJSON, &parsedPackageManifests) if err != nil { errs = append(errs, fmt.Errorf("error parsing package list: %s", err)) return utilerrors.NewAggregate(errs) diff --git a/pkg/sqlite/conversion.go b/pkg/sqlite/conversion.go index a5cd9bb6c..47d2257f7 100644 --- a/pkg/sqlite/conversion.go +++ b/pkg/sqlite/conversion.go @@ -40,6 +40,7 @@ func initializeModelPackages(ctx context.Context, q *SQLQuerier) (model.Model, e return nil, err } + // nolint:prealloc var rPkgs []registry.PackageManifest for _, pkgName := range pkgNames { rPkg, err := q.GetPackage(ctx, pkgName) diff --git a/pkg/sqlite/db_options.go b/pkg/sqlite/db_options.go index e09bfbc03..5d43615f1 100644 --- a/pkg/sqlite/db_options.go +++ b/pkg/sqlite/db_options.go @@ -4,12 +4,14 @@ import ( "database/sql" ) +// nolint:stylecheck type DbOptions struct { // MigratorBuilder is a function that returns a migrator instance MigratorBuilder func(*sql.DB) (Migrator, error) EnableAlpha bool } +// nolint:stylecheck type DbOption func(*DbOptions) func defaultDBOptions() *DbOptions { diff --git a/pkg/sqlite/deprecate.go b/pkg/sqlite/deprecate.go index 4ac3d61eb..80e11fc91 100644 --- a/pkg/sqlite/deprecate.go +++ b/pkg/sqlite/deprecate.go @@ -72,6 +72,7 @@ func (d *PackageDeprecator) MaybeRemovePackages() error { var errs []error var removedBundlePaths []string + // nolint:prealloc var remainingBundlePaths []string // Iterate over bundles list - see if any bundle is the head of a default channel in a package diff --git a/pkg/sqlite/deprecationmessage.go b/pkg/sqlite/deprecationmessage.go index 20a1389b7..a0b4bc75f 100644 --- a/pkg/sqlite/deprecationmessage.go +++ b/pkg/sqlite/deprecationmessage.go @@ -10,7 +10,7 @@ const noticeColor = "\033[1;33m%s\033[0m" func LogSqliteDeprecation() { log := logrus.New() - log.Warnf(DeprecationMessage) + log.Warn(DeprecationMessage) } var DeprecationMessage = fmt.Sprintf(noticeColor, `DEPRECATION NOTICE: diff --git a/pkg/sqlite/directory.go b/pkg/sqlite/directory.go index dfa7b6067..aec1f9225 100644 --- a/pkg/sqlite/directory.go +++ b/pkg/sqlite/directory.go @@ -54,7 +54,9 @@ func (d *DirectoryLoader) Populate() error { // collectWalkErrs calls the given walk func and appends any non-nil, non skip dir error returned to the given errors slice. func collectWalkErrs(walk filepath.WalkFunc, errs *[]error) filepath.WalkFunc { - return func(path string, f os.FileInfo, err error) (walkErr error) { + return func(path string, f os.FileInfo, err error) error { + var walkErr error + // nolint: errorlint if walkErr = walk(path, f, err); walkErr != nil && walkErr != filepath.SkipDir { *errs = append(*errs, walkErr) return nil diff --git a/pkg/sqlite/load.go b/pkg/sqlite/load.go index 86ef768eb..a7d9d5c77 100644 --- a/pkg/sqlite/load.go +++ b/pkg/sqlite/load.go @@ -69,7 +69,7 @@ func (s *sqlLoader) AddOperatorBundle(bundle *registry.Bundle) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -123,6 +123,7 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } if substitutesFor != "" && !s.enableAlpha { + // nolint:stylecheck return fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.") } @@ -204,6 +205,7 @@ func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error if err != nil { return fmt.Errorf("failed to obtain substitutes : %s", err) } + // nolint:nestif if substitutesFor != "" { // Update any replaces that reference the substituted-for bundle _, err = updateBundleReplaces.Exec(csvName, substitutesFor) @@ -406,7 +408,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() var errs []error @@ -506,6 +508,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { // If the number of nodes is 5 and the startDepth is 3, the expected depth is 7 (3, 4, 5, 6, 7) expectedDepth := len(channel.Nodes) + startDepth - 1 if expectedDepth != depth { + // nolint:stylecheck err := fmt.Errorf("Invalid graph: some (non-bottom) nodes defined in the graph were not mentioned as replacements of any node (%d != %d)", expectedDepth, depth) errs = append(errs, err) } @@ -532,7 +535,7 @@ func (s *sqlLoader) AddPackageChannels(manifest registry.PackageManifest) error return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmPackage(tx, manifest.PackageName); err != nil { @@ -591,7 +594,8 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani } var ( - errs []error + errs []error + // nolint:prealloc channels []registry.PackageChannel hasDefault bool ) @@ -716,6 +720,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani // If we find 'replaces' in the circuit list then we've seen it already, break out if _, ok := replaceCycle[replaces]; ok { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Cycle detected, %s replaces %s", channelEntryCSVName, replaces)) break } @@ -731,6 +736,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani break } if _, _, _, err := s.getBundleSkipsReplacesVersion(tx, replaces); err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid bundle %s, replaces nonexistent bundle %s", c.CurrentCSVName, replaces)) break } @@ -749,7 +755,7 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() removeNonHeadBundles, err := tx.Prepare(` @@ -772,34 +778,37 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return tx.Commit() } -func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (replaces string, skips []string, version string, err error) { +func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (string, []string, string, error) { getReplacesSkipsAndVersions, err := tx.Prepare(` SELECT replaces, skips, version FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", nil, "", err } defer getReplacesSkipsAndVersions.Close() rows, rerr := getReplacesSkipsAndVersions.Query(bundleName) if err != nil { err = rerr - return + return "", nil, "", err } defer rows.Close() if !rows.Next() { err = fmt.Errorf("no bundle found for bundlename %s", bundleName) - return + return "", nil, "", err } var replacesStringSQL sql.NullString var skipsStringSQL sql.NullString var versionStringSQL sql.NullString if err = rows.Scan(&replacesStringSQL, &skipsStringSQL, &versionStringSQL); err != nil { - return + return "", nil, "", err } + var replaces string + var skips []string + var version string if replacesStringSQL.Valid { replaces = replacesStringSQL.String } @@ -810,40 +819,41 @@ func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) version = versionStringSQL.String } - return + return replaces, skips, version, nil } -func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (bundlePath string, err error) { +func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (string, error) { getBundlePath, err := tx.Prepare(` SELECT bundlepath FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", err } defer getBundlePath.Close() rows, rerr := getBundlePath.Query(bundleName) if err != nil { err = rerr - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set - return + return "", nil } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } func (s *sqlLoader) addAPIs(tx *sql.Tx, bundle *registry.Bundle) error { @@ -949,7 +959,7 @@ func (s *sqlLoader) RemovePackage(packageName string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() csvNames, err := s.getCSVNames(tx, packageName) @@ -1058,7 +1068,7 @@ func (s *sqlLoader) AddBundlePackageChannels(manifest registry.PackageManifest, return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -1342,7 +1352,7 @@ type tailBundle struct { replacedBy []string // to handle any chain where a skipped entry may be a part of another channel that should not be truncated } -func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, err error) { +func getTailFromBundle(tx *sql.Tx, head string) (map[string]tailBundle, error) { // traverse replaces chain and collect channel list for each bundle. // This assumes that replaces chain for a bundle is the same across channels. // only real bundles with entries in the operator_bundle table are returned. @@ -1391,7 +1401,7 @@ func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, return nil, fmt.Errorf("could not find default channel head for %s", head) } var defaultChannelHead sql.NullString - err = row.Scan(&defaultChannelHead) + err := row.Scan(&defaultChannelHead) if err != nil { return nil, fmt.Errorf("error getting default channel head for %s: %v", head, err) } @@ -1480,7 +1490,7 @@ func (s *sqlLoader) DeprecateBundle(path string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() name, version, err := getBundleNameAndVersionForImage(tx, path) @@ -1590,7 +1600,7 @@ func (s *sqlLoader) RemoveStrandedBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmStrandedBundles(tx); err != nil { @@ -1740,7 +1750,7 @@ func (d *DeprecationAwareLoader) clearLastDeprecatedInPackage(pkg string) error return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // The last deprecated bundles for a package will still have "tombstone" records in channel_entry (among other tables). @@ -1768,7 +1778,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // check if bundle has anything that replaces it getBundlesThatReplaceHeadQuery := `SELECT DISTINCT operatorbundle.name AS replaces, channel_entry.channel_name diff --git a/pkg/sqlite/loadprocs.go b/pkg/sqlite/loadprocs.go index 2ce93c605..218f2cda1 100644 --- a/pkg/sqlite/loadprocs.go +++ b/pkg/sqlite/loadprocs.go @@ -41,6 +41,7 @@ func addReplaces(tx *sql.Tx, replacesID, entryID int64) error { return nil } +// nolint:unused func addPackage(tx *sql.Tx, packageName string) error { addPackage, err := tx.Prepare("insert into package(name) values(?)") if err != nil { @@ -71,6 +72,7 @@ func addPackageIfNotExists(tx *sql.Tx, packageName string) error { return nil } +// nolint:unused func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { addChannel, err := tx.Prepare("insert into channel(name, package_name, head_operatorbundle_name) values(?, ?, ?)") if err != nil { @@ -86,6 +88,7 @@ func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error return nil } +// nolint:unused func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { updateChannel, err := tx.Prepare("update channel set head_operatorbundle_name = ? where name = ? and package_name = ?") if err != nil { diff --git a/pkg/sqlite/migrations/001_related_images.go b/pkg/sqlite/migrations/001_related_images.go index 3b3c8c36b..22045e136 100644 --- a/pkg/sqlite/migrations/001_related_images.go +++ b/pkg/sqlite/migrations/001_related_images.go @@ -45,25 +45,25 @@ func getCSV(ctx context.Context, tx *sql.Tx, name string) (*registry.ClusterServ return nil, err } - var csvJson sql.NullString + var csvJSON sql.NullString if !rows.Next() { return nil, fmt.Errorf("bundle %s not found", name) } - if err := rows.Scan(&csvJson); err != nil { + if err := rows.Scan(&csvJSON); err != nil { return nil, err } - if !csvJson.Valid { + if !csvJSON.Valid { return nil, fmt.Errorf("bad value for csv") } csv := ®istry.ClusterServiceVersion{} - if err := json.Unmarshal([]byte(csvJson.String), csv); err != nil { + if err := json.Unmarshal([]byte(csvJSON.String), csv); err != nil { return nil, err } return csv, nil } func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into related_image(image, operatorbundle_name) values(?,?)` + addSQL := `insert into related_image(image, operatorbundle_name) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling related images: %v", err) @@ -83,7 +83,7 @@ func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { images[k] = struct{}{} } for img := range images { - if _, err := tx.ExecContext(ctx, addSql, img, name); err != nil { + if _, err := tx.ExecContext(ctx, addSQL, img, name); err != nil { logrus.Warnf("error backfilling related images: %v", err) continue } diff --git a/pkg/sqlite/migrations/003_required_apis.go b/pkg/sqlite/migrations/003_required_apis.go index 0253c5119..f25d285ab 100644 --- a/pkg/sqlite/migrations/003_required_apis.go +++ b/pkg/sqlite/migrations/003_required_apis.go @@ -9,14 +9,15 @@ import ( "github.com/sirupsen/logrus" ) +// nolint:stylecheck const RequiredApiMigrationKey = 3 // Register this migration func init() { - registerMigration(RequiredApiMigrationKey, requiredApiMigration) + registerMigration(RequiredApiMigrationKey, requiredAPIMigration) } -var requiredApiMigration = &Migration{ +var requiredAPIMigration = &Migration{ Id: RequiredApiMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { sql := ` @@ -37,8 +38,8 @@ var requiredApiMigration = &Migration{ if err != nil { return err } - for entryId, bundle := range bundles { - if err := extractRequiredApis(ctx, tx, entryId, bundle); err != nil { + for entryID, bundle := range bundles { + if err := extractRequiredApis(ctx, tx, entryID, bundle); err != nil { logrus.Warnf("error backfilling required apis: %v", err) continue } @@ -67,20 +68,20 @@ func getChannelEntryBundles(ctx context.Context, tx *sql.Tx) (map[int64]string, entries := map[int64]string{} for rows.Next() { - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString - if err = rows.Scan(&entryId, &name); err != nil { + if err = rows.Scan(&entryID, &name); err != nil { return nil, err } - if !entryId.Valid || !name.Valid { + if !entryID.Valid || !name.Valid { continue } - entries[entryId.Int64] = name.String + entries[entryID.Int64] = name.String } return entries, nil } -func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name string) error { +func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryID int64, name string) error { addAPI, err := tx.Prepare("insert or replace into api(group_name, version, kind, plural) values(?, ?, ?, ?)") if err != nil { return err @@ -91,12 +92,12 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st } }() - addApiRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") + addAPIRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") if err != nil { return err } defer func() { - if err := addApiRequirer.Close(); err != nil { + if err := addAPIRequirer.Close(); err != nil { logrus.WithError(err).Warningf("error closing prepared statement") } }() @@ -107,7 +108,7 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return err } - _, requiredCRDs, err := csv.GetCustomResourceDefintions() + _, requiredCRDs, _ := csv.GetCustomResourceDefintions() for _, crd := range requiredCRDs { plural, group, err := SplitCRDName(crd.Name) if err != nil { @@ -116,17 +117,17 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st if _, err := addAPI.Exec(group, crd.Version, crd.Kind, plural); err != nil { return err } - if _, err := addApiRequirer.Exec(group, crd.Version, crd.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(group, crd.Version, crd.Kind, entryID); err != nil { return err } } - _, requiredAPIs, err := csv.GetApiServiceDefinitions() + _, requiredAPIs, _ := csv.GetApiServiceDefinitions() for _, api := range requiredAPIs { if _, err := addAPI.Exec(api.Group, api.Version, api.Kind, api.Name); err != nil { return err } - if _, err := addApiRequirer.Exec(api.Group, api.Version, api.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(api.Group, api.Version, api.Kind, entryID); err != nil { return err } } @@ -134,14 +135,13 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return nil } -func SplitCRDName(crdName string) (plural, group string, err error) { +func SplitCRDName(crdName string) (string, string, error) { + var err error pluralGroup := strings.SplitN(crdName, ".", 2) if len(pluralGroup) != 2 { err = fmt.Errorf("can't split bad CRD name %s", crdName) - return + return "", "", err } - plural = pluralGroup[0] - group = pluralGroup[1] - return + return pluralGroup[0], pluralGroup[1], nil } diff --git a/pkg/sqlite/migrations/005_version_skiprange.go b/pkg/sqlite/migrations/005_version_skiprange.go index 60b3c87ad..6a825debc 100644 --- a/pkg/sqlite/migrations/005_version_skiprange.go +++ b/pkg/sqlite/migrations/005_version_skiprange.go @@ -75,7 +75,7 @@ var versionSkipRangeMigration = &Migration{ } func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into operatorbundle(version, skiprange) values(?,?)` + addSQL := `insert into operatorbundle(version, skiprange) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling versioning: %v", err) @@ -89,6 +89,6 @@ func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { version = "" } - _, err = tx.ExecContext(ctx, addSql, version, skiprange) + _, err = tx.ExecContext(ctx, addSQL, version, skiprange) return err } diff --git a/pkg/sqlite/migrations/006_associate_apis_with_bundle.go b/pkg/sqlite/migrations/006_associate_apis_with_bundle.go index f70436f1d..0e57e67fc 100644 --- a/pkg/sqlite/migrations/006_associate_apis_with_bundle.go +++ b/pkg/sqlite/migrations/006_associate_apis_with_bundle.go @@ -11,7 +11,7 @@ const AssociateApisWithBundleMigrationKey = 6 // Register this migration func init() { - registerMigration(AssociateApisWithBundleMigrationKey, bundleApiMigration) + registerMigration(AssociateApisWithBundleMigrationKey, bundleAPIMigration) } // This migration moves the link between the provided and required apis table from the channel_entry to the @@ -24,7 +24,7 @@ func init() { // api_provider: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), // api_requirer: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), -var bundleApiMigration = &Migration{ +var bundleAPIMigration = &Migration{ Id: AssociateApisWithBundleMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { createNew := ` diff --git a/pkg/sqlite/migrations/007_replaces_skips.go b/pkg/sqlite/migrations/007_replaces_skips.go index 7825e89fe..2340634be 100644 --- a/pkg/sqlite/migrations/007_replaces_skips.go +++ b/pkg/sqlite/migrations/007_replaces_skips.go @@ -97,12 +97,12 @@ func extractReplaces(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { return err } - updateSql := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` - _, err = tx.ExecContext(ctx, updateSql, replaces, strings.Join(skips, ","), name) + updateSQL := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` + _, err = tx.ExecContext(ctx, updateSQL, replaces, strings.Join(skips, ","), name) return err } -func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces string, skips []string, err error) { +func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (string, []string, error) { getReplacees := ` SELECT DISTINCT replaces.operatorbundle_name FROM channel_entry @@ -117,26 +117,28 @@ func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces } defer rows.Close() + var replaces string if rows.Next() { var replaceeName sql.NullString if err = rows.Scan(&replaceeName); err != nil { - return + return "", nil, err } if replaceeName.Valid { replaces = replaceeName.String } } + var skips []string skips = []string{} for rows.Next() { var skipName sql.NullString if err = rows.Scan(&skipName); err != nil { - return + return "", nil, err } if !skipName.Valid { continue } skips = append(skips, skipName.String) } - return + return replaces, skips, nil } diff --git a/pkg/sqlite/migrations/009_properties.go b/pkg/sqlite/migrations/009_properties.go index 046675611..7a89859b8 100644 --- a/pkg/sqlite/migrations/009_properties.go +++ b/pkg/sqlite/migrations/009_properties.go @@ -75,12 +75,12 @@ var propertiesMigration = &Migration{ } // update the serialized value to omit the dependency type - updateDependencySql := ` + updateDependencySQL := ` UPDATE dependencies SET value = (SELECT json_remove(value, "$.type") FROM dependencies WHERE operatorbundle_name=dependencies.operatorbundle_name)` - _, err = tx.ExecContext(ctx, updateDependencySql) + _, err = tx.ExecContext(ctx, updateDependencySQL) if err != nil { return err } diff --git a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go index bee961621..d488775b0 100644 --- a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go +++ b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go @@ -15,12 +15,12 @@ func init() { var bundlePathPkgPropertyMigration = &Migration{ Id: BundlePathPkgMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = (SELECT bundlepath FROM operatorbundle WHERE operatorbundle_name = operatorbundle.name AND operatorbundle_version = operatorbundle.version)` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } @@ -28,11 +28,11 @@ var bundlePathPkgPropertyMigration = &Migration{ return nil }, Down: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = null WHERE type = "olm.package"` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } diff --git a/pkg/sqlite/migrations/migrations.go b/pkg/sqlite/migrations/migrations.go index b9bb60fba..475bb7cd6 100644 --- a/pkg/sqlite/migrations/migrations.go +++ b/pkg/sqlite/migrations/migrations.go @@ -8,6 +8,7 @@ import ( ) type Migration struct { + // nolint:stylecheck Id int Up func(context.Context, *sql.Tx) error Down func(context.Context, *sql.Tx) error diff --git a/pkg/sqlite/migrator.go b/pkg/sqlite/migrator.go index 82bacc834..4e0196a12 100644 --- a/pkg/sqlite/migrator.go +++ b/pkg/sqlite/migrator.go @@ -3,6 +3,7 @@ package sqlite import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -86,12 +87,12 @@ func (m *SQLLiteMigrator) Up(ctx context.Context, migrations migrations.Migratio } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version+1 { + if migration.Id != currentVersion+1 { return fmt.Errorf("migration applied out of order") } @@ -127,12 +128,12 @@ func (m *SQLLiteMigrator) Down(ctx context.Context, migrations migrations.Migrat } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version { + if migration.Id != currentVersion { return fmt.Errorf("migration applied out of order") } @@ -175,7 +176,7 @@ func (m *SQLLiteMigrator) tableExists(tx *sql.Tx, table string) (bool, error) { return exists, nil } -func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, err error) { +func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (int, error) { tableExists, err := m.tableExists(tx, m.migrationsTable) if err != nil { return NilVersion, err @@ -185,9 +186,10 @@ func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, } query := `SELECT version FROM ` + m.migrationsTable + ` LIMIT 1` + var version int err = tx.QueryRowContext(ctx, query).Scan(&version) switch { - case err == sql.ErrNoRows: + case errors.Is(err, sql.ErrNoRows): return NilVersion, nil case err != nil: return NilVersion, err diff --git a/pkg/sqlite/migrator_test.go b/pkg/sqlite/migrator_test.go index 1fcd7e2e5..8fd1aee2e 100644 --- a/pkg/sqlite/migrator_test.go +++ b/pkg/sqlite/migrator_test.go @@ -134,7 +134,11 @@ func TestSQLLiteMigrator_Down(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, 0)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + // TODO: this shouldn't be unconditionally rolled back + // run_test_migration, run_migration_out_of_order, and run_error_migration each have at least one scenario + // where rollback is no longer possible (committed or rolled back already) + // In the interest of retaining function and a good lint bright line, we'll just ignore the error here + _ = tx.Rollback() } if err := m.Down(tt.args.ctx, tt.args.migrations); (err != nil) != tt.wantErr { t.Errorf("Down() error = %v, wantErr %v", err, tt.wantErr) @@ -150,7 +154,7 @@ func TestSQLLiteMigrator_Down(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) }) @@ -292,7 +296,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, -1)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } if err := m.Up(tt.args.ctx, tt.args.migrations); (err != nil) != tt.wantErr { t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr) @@ -308,7 +312,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) @@ -326,7 +330,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, NilVersion, version) }) @@ -409,7 +413,7 @@ func TestSQLLiteMigrator_Migrate(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, tt.startVersion)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } if err := m.Migrate(context.TODO()); (err != nil) != tt.wantErr { t.Errorf("Migrate() error = %v, wantErr %v", err, tt.wantErr) @@ -425,7 +429,7 @@ func TestSQLLiteMigrator_Migrate(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) }) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 4a1521a7f..e0a413827 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -63,6 +63,7 @@ func NewSQLLiteQuerier(dbFilename string, opts ...SQLiteQuerierOption) (*SQLQuer return NewSQLLiteQuerierFromDb(db, opts...), nil } +// nolint:stylecheck func NewSQLLiteQuerierFromDb(db *sql.DB, opts ...SQLiteQuerierOption) *SQLQuerier { return NewSQLLiteQuerierFromDBQuerier(dbQuerierAdapter{db}, opts...) } @@ -241,13 +242,13 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s %s", pkgName, channelName, csvName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -265,7 +266,7 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -315,18 +316,18 @@ WHERE channel.name = :channel AND channel.package_name = :package` }, nil } -func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name FROM channel_entry LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id WHERE replaces.operatorbundle_name = ?` rows, err := s.db.QueryContext(ctx, query, name) if err != nil { - return + return nil, err } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString @@ -334,7 +335,7 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri var bundleNameSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ PackageName: pkgNameSQL.String, @@ -345,9 +346,9 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that replace %s", name) - return + return nil, err } - return + return entries, nil } func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, channelName string) (*api.Bundle, error) { @@ -365,13 +366,13 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var outName sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -389,7 +390,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -489,8 +490,8 @@ func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, gro var channelNameSQL sql.NullString var bundleNameSQL sql.NullString var replacesSQL sql.NullString - var min_depth sql.NullInt64 - if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &min_depth); err != nil { + var minDepth sql.NullInt64 + if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &minDepth); err != nil { return nil, err } @@ -532,17 +533,17 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio if !rows.Next() { return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var bundle sql.NullString var bundlePath sql.NullString - var min_depth sql.NullInt64 + var minDepth sql.NullInt64 var bundleName sql.NullString var pkgName sql.NullString var channelName sql.NullString var replaces sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &bundle, &bundlePath, &min_depth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &bundle, &bundlePath, &minDepth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { return nil, err } @@ -564,7 +565,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -627,7 +628,7 @@ func (s *SQLQuerier) GetImagesForBundle(ctx context.Context, csvName string) ([] return images, nil } -func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { groups := map[string]struct{}{} kinds := map[string]struct{}{} versions := map[string]struct{}{} @@ -642,7 +643,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer providedRows.Close() - provided = []*api.GroupVersionKind{} + var provided []*api.GroupVersionKind for providedRows.Next() { var value sql.NullString @@ -678,7 +679,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer requiredRows.Close() - required = []*api.GroupVersionKind{} + var required []*api.GroupVersionKind for requiredRows.Next() { var value sql.NullString @@ -770,7 +771,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } required[i].Plural = plural } - return + return provided, required, nil } func (s *SQLQuerier) GetBundleVersion(ctx context.Context, image string) (string, error) { @@ -809,6 +810,7 @@ func (s *SQLQuerier) GetBundlePathsForPackage(ctx context.Context, pkgName strin return nil, err } if imgName.Valid && imgName.String == "" { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find paths to bundle images") } images = append(images, imgName.String) @@ -844,6 +846,7 @@ func (s *SQLQuerier) GetBundlesForPackage(ctx context.Context, pkgName string) ( key.Version = version.String } if key.IsEmpty() { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find identifier for bundle in package %s", pkgName) } bundles[key] = struct{}{} @@ -1047,7 +1050,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) + _ = buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) out.Dependencies = uniqueDeps(out.Dependencies) if props.Valid { @@ -1055,7 +1058,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) + _ = buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) out.Properties = uniqueProps(out.Properties) if err := stream.Send(out); err != nil { return err @@ -1149,7 +1152,7 @@ func uniqueProps(props []*api.Property) []*api.Property { return list } -func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { depQuery := `SELECT DISTINCT type, value FROM dependencies WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1161,7 +1164,7 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version } defer rows.Close() - dependencies = []*api.Dependency{} + var dependencies []*api.Dependency for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1178,10 +1181,10 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version }) } - return + return dependencies, nil } -func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) (properties []*api.Property, err error) { +func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) ([]*api.Property, error) { propQuery := `SELECT DISTINCT type, value FROM properties WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1193,7 +1196,7 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, } defer rows.Close() - properties = []*api.Property{} + var properties []*api.Property for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1210,10 +1213,10 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, }) } - return + return properties, nil } -func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (bundlePath string, err error) { +func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (string, error) { getBundlePathQuery := ` SELECT bundlepath FROM operatorbundle @@ -1221,26 +1224,27 @@ func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName strin rows, err := s.db.QueryContext(ctx, getBundlePathQuery, bundleName) if err != nil { - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set err = registry.ErrBundleImageNotInDatabase - return + return "", err } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } // ListRegistryBundles returns a set of registry bundles. diff --git a/test/e2e/ctx/ctx.go b/test/e2e/ctx/ctx.go index 1feb90fdd..ca54c7426 100644 --- a/test/e2e/ctx/ctx.go +++ b/test/e2e/ctx/ctx.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + // nolint: stylecheck . "github.com/onsi/ginkgo/v2" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/test/e2e/ctx/provisioner_kubeconfig.go b/test/e2e/ctx/provisioner_kubeconfig.go index 1ce1980d7..05a16f10f 100644 --- a/test/e2e/ctx/provisioner_kubeconfig.go +++ b/test/e2e/ctx/provisioner_kubeconfig.go @@ -26,6 +26,7 @@ func Provision(ctx *TestContext) (func(), error) { } f, err := os.Open(path) + // nolint:nestif if os.IsNotExist(err) { // try in-cluster config // see https://github.com/coreos/etcd-operator/issues/731#issuecomment-283804819