diff --git a/alpha/action/render.go b/alpha/action/render.go index 07631b7c4..6bb64bef2 100644 --- a/alpha/action/render.go +++ b/alpha/action/render.go @@ -70,10 +70,13 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { if err != nil { return nil, fmt.Errorf("create registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() r.Registry = reg } + // nolint:prealloc var cfgs []declcfg.DeclarativeConfig for _, ref := range r.Refs { cfg, err := r.renderReference(ctx, ref) @@ -123,6 +126,7 @@ func (r Render) renderReference(ctx context.Context, ref string) (*declcfg.Decla if err != nil { return r.imageToDeclcfg(ctx, ref) } + // nolint:nestif if stat.IsDir() { dirEntries, err := os.ReadDir(ref) if err != nil { @@ -178,6 +182,7 @@ func (r Render) imageToDeclcfg(ctx context.Context, imageRef string) (*declcfg.D } var cfg *declcfg.DeclarativeConfig + // nolint:nestif if dbFile, ok := labels[containertools.DbLocationLabel]; ok { if !r.AllowedRefMask.Allowed(RefSqliteImage) { return nil, fmt.Errorf("cannot render sqlite image: %w", ErrNotAllowed) @@ -326,10 +331,10 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { return nil, fmt.Errorf("get related images for bundle %q: %v", bundle.Name, err) } - var csvJson []byte + var csvJSON []byte for _, obj := range bundle.Objects { if obj.GetKind() == "ClusterServiceVersion" { - csvJson, err = json.Marshal(obj) + csvJSON, err = json.Marshal(obj) if err != nil { return nil, fmt.Errorf("marshal CSV JSON for bundle %q: %v", bundle.Name, err) } @@ -344,7 +349,7 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { Properties: props, RelatedImages: relatedImages, Objects: objs, - CsvJSON: string(csvJson), + CsvJSON: string(csvJSON), }, nil } diff --git a/alpha/declcfg/load.go b/alpha/declcfg/load.go index f811b3145..7cf43ccfe 100644 --- a/alpha/declcfg/load.go +++ b/alpha/declcfg/load.go @@ -174,7 +174,7 @@ func sendPaths(ctx context.Context, root fs.FS, pathChan chan<- string) error { }) } -func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, options LoadOptions) error { +func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, _ LoadOptions) error { for { select { case <-ctx.Done(): // don't block on receiving from pathChan @@ -205,11 +205,11 @@ func readBundleObjects(b *Bundle) error { if err := json.Unmarshal(props.Value, &obj); err != nil { return fmt.Errorf("package %q, bundle %q: parse property at index %d as bundle object: %v", b.Package, b.Name, i, err) } - objJson, err := yaml.ToJSON(obj.Data) + objJSON, err := yaml.ToJSON(obj.Data) if err != nil { return fmt.Errorf("package %q, bundle %q: convert bundle object property at index %d to JSON: %v", b.Package, b.Name, i, err) } - b.Objects = append(b.Objects, string(objJson)) + b.Objects = append(b.Objects, string(objJSON)) } b.CsvJSON = extractCSV(b.Objects) return nil diff --git a/alpha/declcfg/model_to_declcfg.go b/alpha/declcfg/model_to_declcfg.go index 14424d9f0..23f23c482 100644 --- a/alpha/declcfg/model_to_declcfg.go +++ b/alpha/declcfg/model_to_declcfg.go @@ -103,6 +103,7 @@ func traverseModelChannels(mpkg model.Package) ([]Channel, []Bundle) { channels = append(channels, c) } + // nolint:prealloc var bundles []Bundle for _, b := range bundleMap { b.Properties = property.Deduplicate(b.Properties) diff --git a/alpha/declcfg/write.go b/alpha/declcfg/write.go index 9856c2e1e..686d5534b 100644 --- a/alpha/declcfg/write.go +++ b/alpha/declcfg/write.go @@ -128,6 +128,7 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) for _, c := range cfg.Channels { filteredChannel := writer.filterChannel(&c, versionMap, minVersion, minEdgePackage) + // nolint:nestif if filteredChannel != nil { pkgBuilder, ok := pkgs[c.Package] if !ok { @@ -154,17 +155,17 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) bundleDeprecation = ":::deprecated" } - entryId := fmt.Sprintf("%s-%s", channelID, ce.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryId, ce.Name, bundleDeprecation)) + entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryID, ce.Name, bundleDeprecation)) if len(ce.Replaces) > 0 { - replacesId := fmt.Sprintf("%s-%s", channelID, ce.Replaces) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesId, ce.Replaces, "replace", entryId, ce.Name)) + replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name)) } if len(ce.Skips) > 0 { for _, s := range ce.Skips { - skipsId := fmt.Sprintf("%s-%s", channelID, s) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsId, s, "skip", entryId, ce.Name)) + skipsID := fmt.Sprintf("%s-%s", channelID, s) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name)) } } if len(ce.SkipRange) > 0 { @@ -172,8 +173,8 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) if err == nil { for _, edgeName := range filteredChannel.Entries { if skipRange(versionMap[edgeName.Name]) { - skipRangeId := fmt.Sprintf("%s-%s", channelID, edgeName.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeId, edgeName.Name, "skipRange", ce.SkipRange, entryId, ce.Name)) + skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) + pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name)) } } } else { @@ -186,8 +187,8 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } } - out.Write([]byte("graph LR\n")) - out.Write([]byte(fmt.Sprintf(" classDef deprecated fill:#E8960F\n"))) + _, _ = out.Write([]byte("graph LR\n")) + _, _ = out.Write([]byte(" classDef deprecated fill:#E8960F\n")) pkgNames := []string{} for pname := range pkgs { pkgNames = append(pkgNames, pname) @@ -196,19 +197,19 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) return pkgNames[i] < pkgNames[j] }) for _, pkgName := range pkgNames { - out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) - out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) - out.Write([]byte(pkgs[pkgName].String())) - out.Write([]byte(" end\n")) + _, _ = out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) + _, _ = out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) + _, _ = out.Write([]byte(pkgs[pkgName].String())) + _, _ = out.Write([]byte(" end\n")) } if deprecatedPackage != "" { - out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) + _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) } if len(deprecatedChannels) > 0 { for _, deprecatedChannel := range deprecatedChannels { - out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) + _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) } } @@ -236,6 +237,7 @@ func (writer *MermaidWriter) filterChannel(c *Channel, versionMap map[string]sem out := &Channel{Name: c.Name, Package: c.Package, Properties: c.Properties, Entries: []ChannelEntry{}} for _, ce := range c.Entries { filteredCe := ChannelEntry{Name: ce.Name} + // nolint:nestif if writer.MinEdgeName == "" { // no minimum-edge specified filteredCe.SkipRange = ce.SkipRange diff --git a/alpha/model/model.go b/alpha/model/model.go index d570f93c3..9b4e3ae85 100644 --- a/alpha/model/model.go +++ b/alpha/model/model.go @@ -161,6 +161,7 @@ func (i *Icon) Validate() error { return result.orNil() } +// nolint:unused func (i *Icon) validateData() error { if !filetype.IsImage(i.Data) { return errors.New("icon data is not an image") diff --git a/alpha/template/semver/semver.go b/alpha/template/semver/semver.go index d44e1c9d9..2f0a8676b 100644 --- a/alpha/template/semver/semver.go +++ b/alpha/template/semver/semver.go @@ -22,6 +22,7 @@ func (t Template) Render(ctx context.Context) (*declcfg.DeclarativeConfig, error return nil, fmt.Errorf("render: unable to read file: %v", err) } + // nolint:prealloc var cfgs []declcfg.DeclarativeConfig bundleDict := buildBundleList(*sv) @@ -211,6 +212,7 @@ func (sv *semverTemplate) generateChannels(semverChannels *bundleVersions) []dec // sort the channel archetypes in ascending order so we can traverse the bundles in order of // their source channel's priority + // nolint:prealloc var archetypesByPriority []channelArchetype for k := range channelPriorities { archetypesByPriority = append(archetypesByPriority, k) @@ -391,6 +393,7 @@ func getMinorVersion(v semver.Version) semver.Version { } } +// nolint:unused func getMajorVersion(v semver.Version) semver.Version { return semver.Version{ Major: v.Major, diff --git a/cmd/opm/alpha/bundle/extract.go b/cmd/opm/alpha/bundle/extract.go index 3952a52b8..3cc7e968f 100644 --- a/cmd/opm/alpha/bundle/extract.go +++ b/cmd/opm/alpha/bundle/extract.go @@ -33,7 +33,7 @@ func init() { extractCmd.Flags().StringP("namespace", "n", "openshift-operator-lifecycle-manager", "namespace to write configmap data") extractCmd.Flags().Uint64P("datalimit", "l", 1<<20, "maximum limit in bytes for total bundle data") extractCmd.Flags().BoolP("gzip", "z", false, "enable gzip compression of configmap data") - extractCmd.MarkPersistentFlagRequired("configmapname") + _ = extractCmd.MarkPersistentFlagRequired("configmapname") } func runExtractCmd(cmd *cobra.Command, _ []string) error { diff --git a/cmd/opm/alpha/bundle/unpack.go b/cmd/opm/alpha/bundle/unpack.go index 82bddff77..369442eb0 100644 --- a/cmd/opm/alpha/bundle/unpack.go +++ b/cmd/opm/alpha/bundle/unpack.go @@ -58,6 +58,7 @@ func unpackBundle(cmd *cobra.Command, args []string) error { return err } + // nolint:nestif if info, err := os.Stat(out); err != nil { if os.IsNotExist(err) { err = os.MkdirAll(out, 0755) diff --git a/cmd/opm/alpha/list/cmd.go b/cmd/opm/alpha/list/cmd.go index 0f234e39b..79f9fd9c8 100644 --- a/cmd/opm/alpha/list/cmd.go +++ b/cmd/opm/alpha/list/cmd.go @@ -43,7 +43,9 @@ func newPackagesCmd() *cobra.Command { if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lp := action.ListPackages{IndexReference: args[0], Registry: reg} res, err := lp.Run(cmd.Context()) if err != nil { @@ -72,7 +74,9 @@ func newChannelsCmd() *cobra.Command { if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lc := action.ListChannels{IndexReference: args[0], Registry: reg} if len(args) > 1 { lc.PackageName = args[1] @@ -106,7 +110,9 @@ for each channel in which the bundle is present). if err != nil { logger.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() lb := action.ListBundles{IndexReference: args[0], Registry: reg} if len(args) > 1 { lb.PackageName = args[1] diff --git a/cmd/opm/alpha/template/basic.go b/cmd/opm/alpha/template/basic.go index 4195bd0fe..de6aed367 100644 --- a/cmd/opm/alpha/template/basic.go +++ b/cmd/opm/alpha/template/basic.go @@ -62,7 +62,9 @@ When FILE is '-' or not provided, the template is read from standard input`, if err != nil { log.Fatalf("creating containerd registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() var m *migrations.Migrations if migrateLevel != "" { diff --git a/cmd/opm/alpha/template/semver.go b/cmd/opm/alpha/template/semver.go index 97dccbc6c..eb07ab568 100644 --- a/cmd/opm/alpha/template/semver.go +++ b/cmd/opm/alpha/template/semver.go @@ -68,7 +68,9 @@ When FILE is '-' or not provided, the template is read from standard input`, if err != nil { log.Fatalf("creating containerd registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() var m *migrations.Migrations if migrateLevel != "" { diff --git a/cmd/opm/generate/cmd.go b/cmd/opm/generate/cmd.go index 0bf5b6c9b..7eb2315d8 100644 --- a/cmd/opm/generate/cmd.go +++ b/cmd/opm/generate/cmd.go @@ -99,7 +99,7 @@ A separate builder and base image can be specified. The builder image may not be cmd.Flags().StringVarP(&baseImage, "base-image", "i", containertools.DefaultBinarySourceImage, "Image base to use to build catalog.") cmd.Flags().StringVarP(&builderImage, "builder-image", "b", containertools.DefaultBinarySourceImage, "Image to use as a build stage.") cmd.Flags().StringSliceVarP(&extraLabelStrs, "extra-labels", "l", []string{}, "Extra labels to include in the generated Dockerfile. Labels should be of the form 'key=value'.") - cmd.Flags().MarkDeprecated("binary-image", "use --base-image instead") + _ = cmd.Flags().MarkDeprecated("binary-image", "use --base-image instead") cmd.MarkFlagsMutuallyExclusive("binary-image", "base-image") return cmd } diff --git a/cmd/opm/internal/util/util.go b/cmd/opm/internal/util/util.go index 9e0e006be..e007caa48 100644 --- a/cmd/opm/internal/util/util.go +++ b/cmd/opm/internal/util/util.go @@ -46,7 +46,7 @@ func GetTLSOptions(cmd *cobra.Command) (bool, bool, error) { // This works in tandem with opm/index/cmd, which adds the relevant flags as persistent // as part of the root command (cmd/root/cmd) initialization func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) { - skipTlsVerify, useHTTP, err := GetTLSOptions(cmd) + skipTLSVerify, useHTTP, err := GetTLSOptions(cmd) if err != nil { return nil, err } @@ -58,7 +58,7 @@ func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) reg, err := containerdregistry.NewRegistry( containerdregistry.WithCacheDir(cacheDir), - containerdregistry.SkipTLSVerify(skipTlsVerify), + containerdregistry.SkipTLSVerify(skipTLSVerify), containerdregistry.WithPlainHTTP(useHTTP), containerdregistry.WithLog(log.Null()), ) diff --git a/cmd/opm/render/cmd.go b/cmd/opm/render/cmd.go index 683d11a1c..f1923406c 100644 --- a/cmd/opm/render/cmd.go +++ b/cmd/opm/render/cmd.go @@ -55,7 +55,9 @@ database files. if err != nil { log.Fatal(err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() render.Registry = reg diff --git a/cmd/opm/serve/serve.go b/cmd/opm/serve/serve.go index 011286c6f..2d5e913cc 100644 --- a/cmd/opm/serve/serve.go +++ b/cmd/opm/serve/serve.go @@ -44,7 +44,7 @@ type serve struct { } const ( - defaultCpuStartupPath string = "/debug/pprof/startup/cpu" + defaultCPUStartupPath string = "/debug/pprof/startup/cpu" ) func NewCmd() *cobra.Command { @@ -99,7 +99,7 @@ func (s *serve) run(ctx context.Context) error { return fmt.Errorf("could not start pprof endpoint: %v", err) } if s.captureProfiles { - if err := p.startCpuProfileCache(); err != nil { + if err := p.startCPUProfileCache(); err != nil { return fmt.Errorf("could not start CPU profile: %v", err) } } @@ -169,7 +169,7 @@ func (s *serve) run(ctx context.Context) error { health.RegisterHealthServer(grpcServer, server.NewHealthServer()) reflection.Register(grpcServer) mainLogger.Info("serving registry") - p.stopCpuProfileCache() + p.stopCPUProfileCache() go func() { <-ctx.Done() @@ -224,7 +224,7 @@ func (p *profilerInterface) startEndpoint() error { mux.HandleFunc("/debug/pprof/profile", endpoint.Profile) mux.HandleFunc("/debug/pprof/symbol", endpoint.Symbol) mux.HandleFunc("/debug/pprof/trace", endpoint.Trace) - mux.HandleFunc(defaultCpuStartupPath, p.httpHandler) + mux.HandleFunc(defaultCPUStartupPath, p.httpHandler) p.server = http.Server{ Addr: p.addr, @@ -249,13 +249,13 @@ func (p *profilerInterface) startEndpoint() error { return nil } -func (p *profilerInterface) startCpuProfileCache() error { +func (p *profilerInterface) startCPUProfileCache() error { // short-circuit if not enabled if !p.isEnabled() { return nil } - p.logger.Infof("start caching cpu profile data at %q", defaultCpuStartupPath) + p.logger.Infof("start caching cpu profile data at %q", defaultCPUStartupPath) if err := pprof.StartCPUProfile(&p.cache); err != nil { return err } @@ -263,7 +263,7 @@ func (p *profilerInterface) startCpuProfileCache() error { return nil } -func (p *profilerInterface) stopCpuProfileCache() { +func (p *profilerInterface) stopCPUProfileCache() { // short-circuit if not enabled if !p.isEnabled() { return @@ -277,7 +277,7 @@ func (p *profilerInterface) httpHandler(w http.ResponseWriter, r *http.Request) if !p.isCacheReady() { http.Error(w, "cpu profile cache is not yet ready", http.StatusServiceUnavailable) } - w.Write(p.cache.Bytes()) + _, _ = w.Write(p.cache.Bytes()) } func (p *profilerInterface) stopEndpoint(ctx context.Context) error { diff --git a/pkg/api/api_to_model.go b/pkg/api/api_to_model.go index 5c0cb603a..50088ab4f 100644 --- a/pkg/api/api_to_model.go +++ b/pkg/api/api_to_model.go @@ -42,6 +42,7 @@ func ConvertAPIBundleToModelBundle(b *Bundle) (*model.Bundle, error) { } func convertAPIBundleToModelProperties(b *Bundle) ([]property.Property, error) { + // nolint:prealloc var out []property.Property providedGVKs := map[property.GVK]struct{}{} diff --git a/pkg/api/model_to_api.go b/pkg/api/model_to_api.go index 40ccdefee..73162d255 100644 --- a/pkg/api/model_to_api.go +++ b/pkg/api/model_to_api.go @@ -21,8 +21,8 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { return nil, fmt.Errorf("parse properties: %v", err) } - csvJson := b.CsvJSON - if csvJson == "" && len(props.CSVMetadatas) == 1 { + csvJSON := b.CsvJSON + if csvJSON == "" && len(props.CSVMetadatas) == 1 { var icons []v1alpha1.Icon if b.Package.Icon != nil { icons = []v1alpha1.Icon{{ @@ -47,9 +47,9 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { if err != nil { return nil, err } - csvJson = string(csvData) + csvJSON = string(csvData) if len(b.Objects) == 0 { - b.Objects = []string{csvJson} + b.Objects = []string{csvJSON} } } @@ -77,7 +77,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { Properties: convertModelPropertiesToAPIProperties(b.Properties), Replaces: b.Replaces, Skips: b.Skips, - CsvJson: csvJson, + CsvJson: csvJSON, Object: b.Objects, Deprecation: deprecation, }, nil @@ -128,6 +128,7 @@ func csvMetadataToCsv(m property.CSVMetadata) v1alpha1.ClusterServiceVersion { } func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -139,6 +140,7 @@ func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { return out } func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ diff --git a/pkg/cache/json.go b/pkg/cache/json.go index fc73431e9..92b17f752 100644 --- a/pkg/cache/json.go +++ b/pkg/cache/json.go @@ -102,11 +102,11 @@ func (q *jsonBackend) GetPackageIndex(_ context.Context) (packageIndex, error) { } func (q *jsonBackend) PutPackageIndex(_ context.Context, pi packageIndex) error { - packageJson, err := json.Marshal(pi) + packageJSON, err := json.Marshal(pi) if err != nil { return err } - if err := os.WriteFile(filepath.Join(q.baseDir, jsonPackagesFile), packageJson, jsonCacheModeFile); err != nil { + if err := os.WriteFile(filepath.Join(q.baseDir, jsonPackagesFile), packageJSON, jsonCacheModeFile); err != nil { return err } return nil diff --git a/pkg/cache/pkgs.go b/pkg/cache/pkgs.go index e590823b4..c89a9aab4 100644 --- a/pkg/cache/pkgs.go +++ b/pkg/cache/pkgs.go @@ -14,6 +14,7 @@ import ( type packageIndex map[string]cPkg func (pkgs packageIndex) ListPackages(_ context.Context) ([]string, error) { + // nolint:prealloc var packages []string for pkgName := range pkgs { packages = append(packages, pkgName) @@ -27,6 +28,7 @@ func (pkgs packageIndex) GetPackage(_ context.Context, name string) (*registry.P return nil, fmt.Errorf("package %q not found", name) } + // nolint:prealloc var channels []registry.PackageChannel for _, ch := range pkg.Channels { var deprecation *registry.Deprecation diff --git a/pkg/cache/pogrebv1.go b/pkg/cache/pogrebv1.go index a340b4458..78eb13fd8 100644 --- a/pkg/cache/pogrebv1.go +++ b/pkg/cache/pogrebv1.go @@ -38,7 +38,7 @@ const ( pograbV1CacheDir = FormatPogrebV1 pogrebDigestFile = pograbV1CacheDir + "/digest" - pogrebDbDir = pograbV1CacheDir + "/db" + pogrebDBDir = pograbV1CacheDir + "/db" ) type pogrebV1Backend struct { @@ -76,7 +76,7 @@ func (q *pogrebV1Backend) Init() error { } func (q *pogrebV1Backend) Open() error { - db, err := pogreb.Open(filepath.Join(q.baseDir, pogrebDbDir), &pogreb.Options{FileSystem: pogrebfs.OSMMap}) + db, err := pogreb.Open(filepath.Join(q.baseDir, pogrebDBDir), &pogreb.Options{FileSystem: pogrebfs.OSMMap}) if err != nil { return err } @@ -93,7 +93,7 @@ func (q *pogrebV1Backend) Close() error { } // Recursively fixup permissions on the DB directory. - return filepath.Walk(filepath.Join(q.baseDir, pogrebDbDir), func(path string, info os.FileInfo, err error) error { + return filepath.Walk(filepath.Join(q.baseDir, pogrebDBDir), func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -128,11 +128,11 @@ func (q *pogrebV1Backend) GetPackageIndex(_ context.Context) (packageIndex, erro } func (q *pogrebV1Backend) PutPackageIndex(_ context.Context, index packageIndex) error { - packageJson, err := json.Marshal(index) + packageJSON, err := json.Marshal(index) if err != nil { return err } - return q.db.Put([]byte("packages.json"), packageJson) + return q.db.Put([]byte("packages.json"), packageJSON) } func (q *pogrebV1Backend) dbKey(in bundleKey) []byte { diff --git a/pkg/client/kubeclient.go b/pkg/client/kubeclient.go index 17a6532f8..10de8ee26 100644 --- a/pkg/client/kubeclient.go +++ b/pkg/client/kubeclient.go @@ -26,6 +26,7 @@ func NewKubeClient(kubeconfig string, logger *logrus.Logger) (clientset *kuberne } if err != nil { + // nolint:stylecheck err = fmt.Errorf("Cannot load config for REST client: %v", err) return } diff --git a/pkg/containertools/dockerfilegenerator.go b/pkg/containertools/dockerfilegenerator.go index 79059b9ee..9a2e4a363 100644 --- a/pkg/containertools/dockerfilegenerator.go +++ b/pkg/containertools/dockerfilegenerator.go @@ -9,7 +9,7 @@ import ( const ( DefaultBinarySourceImage = "quay.io/operator-framework/opm:latest" - DefaultDbLocation = "/database/index.db" + DefaultDBLocation = "/database/index.db" DbLocationLabel = "operators.operatorframework.io.index.database.v1" ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" ) @@ -46,13 +46,13 @@ func (g *IndexDockerfileGenerator) GenerateIndexDockerfile(binarySourceImage, da dockerfile += fmt.Sprintf("FROM %s\n", binarySourceImage) // Labels - dockerfile += fmt.Sprintf("LABEL %s=%s\n", DbLocationLabel, DefaultDbLocation) + dockerfile += fmt.Sprintf("LABEL %s=%s\n", DbLocationLabel, DefaultDBLocation) // Content - dockerfile += fmt.Sprintf("ADD %s %s\n", databasePath, DefaultDbLocation) + dockerfile += fmt.Sprintf("ADD %s %s\n", databasePath, DefaultDBLocation) dockerfile += "EXPOSE 50051\n" dockerfile += "ENTRYPOINT [\"/bin/opm\"]\n" - dockerfile += fmt.Sprintf("CMD [\"registry\", \"serve\", \"--database\", \"%s\"]\n", DefaultDbLocation) + dockerfile += fmt.Sprintf("CMD [\"registry\", \"serve\", \"--database\", \"%s\"]\n", DefaultDBLocation) return dockerfile } diff --git a/pkg/containertools/labelreader.go b/pkg/containertools/labelreader.go index 57de73829..18ad46d98 100644 --- a/pkg/containertools/labelreader.go +++ b/pkg/containertools/labelreader.go @@ -71,5 +71,6 @@ func (r ImageLabelReader) GetLabelsFromImage(image string) (map[string]string, e return data[0].Labels, nil } + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse label data from container") } diff --git a/pkg/lib/bundle/build.go b/pkg/lib/bundle/build.go index 08b0fa808..5bfb517fc 100644 --- a/pkg/lib/bundle/build.go +++ b/pkg/lib/bundle/build.go @@ -31,6 +31,7 @@ func ExecuteCommand(cmd *exec.Cmd) error { log.Debugf("Running %#v", cmd.Args) if err := cmd.Run(); err != nil { + // nolint:stylecheck return fmt.Errorf("Failed to exec %#v: %v", cmd.Args, err) } diff --git a/pkg/lib/bundle/errors.go b/pkg/lib/bundle/errors.go index 5e0735adf..869cf061c 100644 --- a/pkg/lib/bundle/errors.go +++ b/pkg/lib/bundle/errors.go @@ -12,6 +12,7 @@ type ValidationError struct { } func (v ValidationError) Error() string { + // nolint:prealloc var errs []string for _, err := range v.Errors { errs = append(errs, err.Error()) diff --git a/pkg/lib/bundle/generate.go b/pkg/lib/bundle/generate.go index 7fc1fbabc..93d6e0746 100644 --- a/pkg/lib/bundle/generate.go +++ b/pkg/lib/bundle/generate.go @@ -79,6 +79,7 @@ func GenerateFunc(directory, outputDir, packageName, channels, channelDefault st // Channels and packageName are required fields where as default channel is automatically filled if unspecified // and that either of the required field is missing. We are interpreting the bundle information through // bundle directory embedded in the package folder. + // nolint:nestif if channels == "" || packageName == "" { var notProvided []string if channels == "" { @@ -204,6 +205,7 @@ func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDi // Currently able to detect helm chart, registry+v1 (CSV) and plain k8s resources // such as CRD. func GetMediaType(directory string) (string, error) { + // nolint:prealloc var files []string k8sFiles := make(map[string]*unstructured.Unstructured) @@ -219,6 +221,7 @@ func GetMediaType(directory string) (string, error) { fileWithPath := filepath.Join(directory, item.Name()) fileBlob, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck return "", fmt.Errorf("Unable to read file %s in bundle", fileWithPath) } @@ -230,6 +233,7 @@ func GetMediaType(directory string) (string, error) { } if len(files) == 0 { + // nolint:stylecheck return "", fmt.Errorf("The directory %s contains no yaml files", directory) } @@ -276,11 +280,13 @@ func ValidateAnnotations(existing, expected []byte) error { for label, item := range expectedAnnotations.Annotations { value, hasAnnotation := fileAnnotations.Annotations[label] if !hasAnnotation { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Missing field: %s", label)) continue } if item != value { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Expect field %q to have value %q instead of %q", label, item, value)) } @@ -443,6 +449,7 @@ func copyManifestDir(from, to string, overwrite bool) error { return nil } +// nolint:unused func containsString(slice []string, s string) bool { for _, item := range slice { if item == s { diff --git a/pkg/lib/bundle/validate.go b/pkg/lib/bundle/validate.go index 18708186a..99c0bc76e 100644 --- a/pkg/lib/bundle/validate.go +++ b/pkg/lib/bundle/validate.go @@ -101,9 +101,11 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } if manifestsFound == false { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate manifests directory")) } if metadataFound == false { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate metadata directory")) } @@ -145,6 +147,7 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } if !annotationsFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Could not find annotations file")) } else { i.logger.Debug("Found annotations file") @@ -186,6 +189,7 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) for label, item := range annotations { val, ok := fileAnnotations.Annotations[label] if !ok && label != ChannelDefaultLabel { + // nolint:stylecheck aErr := fmt.Errorf("Missing annotation %q", label) validationErrors = append(validationErrors, aErr) } @@ -193,26 +197,31 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) switch label { case MediatypeLabel: if item != val { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, item, val) validationErrors = append(validationErrors, aErr) } case ManifestsLabel: if item != ManifestsDir { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, ManifestsDir, val) validationErrors = append(validationErrors, aErr) } case MetadataDir: if item != MetadataLabel { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, MetadataDir, val) validationErrors = append(validationErrors, aErr) } case ChannelsLabel: if val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } case ChannelDefaultLabel: if ok && val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } @@ -292,6 +301,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { fileWithPath := filepath.Join(manifestDir, item.Name()) data, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to read file %s in supported types", fileWithPath)) continue } @@ -314,6 +324,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { continue } + // nolint:nestif if gvk.Kind == CSVKind { err := runtime.DefaultUnstructuredConverter.FromUnstructured(k8sFile.Object, csv) if err != nil { @@ -362,6 +373,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { } } default: + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unsupported api version of CRD: %s", gv)) } } else { diff --git a/pkg/lib/image/registry.go b/pkg/lib/image/registry.go index 2c5c7f07a..42aea1637 100644 --- a/pkg/lib/image/registry.go +++ b/pkg/lib/image/registry.go @@ -109,6 +109,7 @@ func RunDockerRegistry(ctx context.Context, rootDir string, configOpts ...Config if err != nil { return false, nil } + defer r.Body.Close() if r.StatusCode == http.StatusOK { return true, nil } diff --git a/pkg/lib/indexer/indexer.go b/pkg/lib/indexer/indexer.go index 6e53a9f20..6d21b13ec 100644 --- a/pkg/lib/indexer/indexer.go +++ b/pkg/lib/indexer/indexer.go @@ -37,6 +37,7 @@ const ( concurrencyLimitForExport = 10 ) +// nolint:stylecheck var ErrFileBasedCatalogPrune = errors.New("`opm index prune` only supports sqlite-based catalogs. See https://github.com/redhat-openshift-ecosystem/community-operators-prod/issues/793 for instructions on pruning a plaintext files backed catalog.") // ImageIndexer is a struct implementation of the Indexer interface diff --git a/pkg/lib/registry/registry.go b/pkg/lib/registry/registry.go index dc0bec859..ad5b9c2d0 100644 --- a/pkg/lib/registry/registry.go +++ b/pkg/lib/registry/registry.go @@ -133,6 +133,7 @@ func unpackImage(ctx context.Context, reg image.Registry, ref image.Reference) ( func populate(ctx context.Context, loader registry.Load, graphLoader registry.GraphLoader, querier registry.Query, reg image.Registry, refs []image.Reference, mode registry.Mode, overwrite bool) error { unpackedImageMap := make(map[image.Reference]string, 0) overwrittenBundles := map[string][]string{} + // nolint:prealloc var imagesToAdd []*registry.Bundle for _, ref := range refs { to, from, cleanup, err := unpackImage(ctx, reg, ref) @@ -391,6 +392,7 @@ func checkForBundlePaths(querier registry.GRPCQuery, bundlePaths []string) ([]st registryBundlePaths[b.BundlePath] = struct{}{} } + // nolint:prealloc var found, missing []string for _, b := range bundlePaths { if _, ok := registryBundlePaths[b]; ok { @@ -408,7 +410,7 @@ func checkForBundlePaths(querier registry.GRPCQuery, bundlePaths []string) ([]st // replaces mode selects highest version as channel head and // prunes any bundles in the upgrade chain after the channel head. // check for the presence of newly added bundles after a replaces-mode add. -func checkForBundles(ctx context.Context, q *sqlite.SQLQuerier, g registry.GraphLoader, required []*registry.Bundle) error { +func checkForBundles(_ context.Context, _ *sqlite.SQLQuerier, g registry.GraphLoader, required []*registry.Bundle) error { var errs []error for _, bundle := range required { graph, err := g.Generate(bundle.Package) diff --git a/pkg/lib/semver/semver.go b/pkg/lib/semver/semver.go index 6875566d0..60721cdaf 100644 --- a/pkg/lib/semver/semver.go +++ b/pkg/lib/semver/semver.go @@ -8,6 +8,7 @@ import ( // BuildIdCompare compares two versions and returns negative one if the first arg is less than the second arg, positive one if it is larger, and zero if they are equal. // This comparison follows typical semver precedence rules, with one addition: whenever two versions are equal with the exception of their build-ids, the build-ids are compared using prerelease precedence rules. Further, versions with no build-id are always less than versions with build-ids; e.g. 1.0.0 < 1.0.0+1. +// nolint:stylecheck func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { if c := b.Compare(v); c != 0 { return c, nil @@ -27,6 +28,7 @@ func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { } func buildAsPrerelease(v semver.Version) (*semver.Version, error) { + // nolint:prealloc var pre []semver.PRVersion for _, b := range v.Build { p, err := semver.NewPRVersion(b) diff --git a/pkg/mirror/options.go b/pkg/mirror/options.go index c9d3b3d9e..51c004faa 100644 --- a/pkg/mirror/options.go +++ b/pkg/mirror/options.go @@ -44,30 +44,30 @@ func (o *IndexImageMirrorerOptions) Complete() error { } // Apply sequentially applies the given options to the config. -func (c *IndexImageMirrorerOptions) Apply(options []ImageIndexMirrorOption) { +func (o *IndexImageMirrorerOptions) Apply(options []ImageIndexMirrorOption) { for _, option := range options { - option(c) + option(o) } } // ToOption converts an IndexImageMirrorerOptions object into a function that applies // its current configuration to another IndexImageMirrorerOptions instance -func (c *IndexImageMirrorerOptions) ToOption() ImageIndexMirrorOption { - return func(o *IndexImageMirrorerOptions) { - if c.ImageMirrorer != nil { - o.ImageMirrorer = c.ImageMirrorer +func (o *IndexImageMirrorerOptions) ToOption() ImageIndexMirrorOption { + return func(io *IndexImageMirrorerOptions) { + if o.ImageMirrorer != nil { + io.ImageMirrorer = o.ImageMirrorer } - if c.DatabaseExtractor != nil { - o.DatabaseExtractor = c.DatabaseExtractor + if o.DatabaseExtractor != nil { + io.DatabaseExtractor = o.DatabaseExtractor } - if c.Source != "" { - o.Source = c.Source + if o.Source != "" { + io.Source = o.Source } - if c.Dest != "" { - o.Dest = c.Dest + if o.Dest != "" { + io.Dest = o.Dest } - if c.ManifestDir != "" { - o.ManifestDir = c.ManifestDir + if o.ManifestDir != "" { + io.ManifestDir = o.ManifestDir } } } diff --git a/pkg/prettyunmarshaler/prettyunmarshaler.go b/pkg/prettyunmarshaler/prettyunmarshaler.go index abbf586fd..788428440 100644 --- a/pkg/prettyunmarshaler/prettyunmarshaler.go +++ b/pkg/prettyunmarshaler/prettyunmarshaler.go @@ -8,29 +8,29 @@ import ( "strings" ) -type JsonUnmarshalError struct { +type JSONUnmarshalError struct { data []byte offset int64 err error } -func NewJSONUnmarshalError(data []byte, err error) *JsonUnmarshalError { +func NewJSONUnmarshalError(data []byte, err error) *JSONUnmarshalError { var te *json.UnmarshalTypeError if errors.As(err, &te) { - return &JsonUnmarshalError{data: data, offset: te.Offset, err: te} + return &JSONUnmarshalError{data: data, offset: te.Offset, err: te} } var se *json.SyntaxError if errors.As(err, &se) { - return &JsonUnmarshalError{data: data, offset: se.Offset, err: se} + return &JSONUnmarshalError{data: data, offset: se.Offset, err: se} } - return &JsonUnmarshalError{data: data, offset: -1, err: err} + return &JSONUnmarshalError{data: data, offset: -1, err: err} } -func (e *JsonUnmarshalError) Error() string { +func (e *JSONUnmarshalError) Error() string { return e.err.Error() } -func (e *JsonUnmarshalError) Pretty() string { +func (e *JSONUnmarshalError) Pretty() string { if len(e.data) == 0 || e.offset < 0 || e.offset > int64(len(e.data)) { return e.err.Error() } diff --git a/pkg/registry/bundle.go b/pkg/registry/bundle.go index e87260204..cd84f1d8b 100644 --- a/pkg/registry/bundle.go +++ b/pkg/registry/bundle.go @@ -167,6 +167,7 @@ func (b *Bundle) CustomResourceDefinitions() ([]runtime.Object, error) { if err := b.cache(); err != nil { return nil, err } + // nolint:prealloc var crds []runtime.Object for _, crd := range b.v1crds { crds = append(crds, crd) diff --git a/pkg/registry/bundlegraphloader.go b/pkg/registry/bundlegraphloader.go index 3995a88c9..2854003a2 100644 --- a/pkg/registry/bundlegraphloader.go +++ b/pkg/registry/bundlegraphloader.go @@ -16,6 +16,7 @@ type BundleGraphLoader struct { func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, annotations *AnnotationsFile, skippatch bool) (*Package, error) { bundleVersion, err := bundle.Version() if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to extract bundle version from bundle %s, can't insert in semver mode", bundle.BundleImage) } @@ -43,6 +44,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann if graph.DefaultChannel == "" { // Infer default channel from channel list if annotations.SelectDefaultChannel() == "" { + // nolint:stylecheck return nil, fmt.Errorf("Default channel is missing and can't be inferred") } graph.DefaultChannel = annotations.SelectDefaultChannel() @@ -83,6 +85,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann for node := range channelGraph.Nodes { nodeVersion, err := semver.Make(node.Version) if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse existing bundle version stored in index %s %s %s", node.CsvName, node.Version, node.BundlePath) } diff --git a/pkg/registry/channelupdateoptions.go b/pkg/registry/channelupdateoptions.go index 85f5acb40..d45bd414e 100644 --- a/pkg/registry/channelupdateoptions.go +++ b/pkg/registry/channelupdateoptions.go @@ -22,6 +22,7 @@ func GetModeFromString(mode string) (Mode, error) { case "semver-skippatch": return SkipPatchMode, nil default: + // nolint:stylecheck return -1, fmt.Errorf("Invalid channel update mode %s specified", mode) } } diff --git a/pkg/registry/csv.go b/pkg/registry/csv.go index 69abb61f6..40f3bf97f 100644 --- a/pkg/registry/csv.go +++ b/pkg/registry/csv.go @@ -20,6 +20,7 @@ import ( const ( // Name of the CSV's kind + // nolint:unused clusterServiceVersionKind = "ClusterServiceVersion" // Name of the section under which the list of owned and required list of @@ -44,9 +45,11 @@ const ( icon = "icon" // The yaml attribute that points to the icon.base64data for the ClusterServiceVersion + // nolint:unused base64data = "base64data" // The yaml attribute that points to the icon.mediatype for the ClusterServiceVersion + // nolint:unused mediatype = "mediatype" // The yaml attribute that points to the description for the ClusterServiceVersion description = "description" diff --git a/pkg/registry/directoryGraphLoader.go b/pkg/registry/directoryGraphLoader.go index d0aeda74f..4b7209188 100644 --- a/pkg/registry/directoryGraphLoader.go +++ b/pkg/registry/directoryGraphLoader.go @@ -76,6 +76,7 @@ func (g *DirGraphLoader) loadBundleCsvPathMap() error { } CsvNameAndReplaceMap := make(map[string]csvReplaces) for _, bundlePath := range bundleDirs { + //nolint:nestif if bundlePath.IsDir() { csvStruct, err := ReadCSVFromBundleDirectory(filepath.Join(g.PackageDir, bundlePath.Name())) if err != nil { diff --git a/pkg/registry/parse.go b/pkg/registry/parse.go index 4725dbcf9..6af05bf39 100644 --- a/pkg/registry/parse.go +++ b/pkg/registry/parse.go @@ -157,6 +157,7 @@ func (b *bundleParser) addMetadata(metadata fs.FS, bundle *Bundle) error { bundle.Package = af.Annotations.PackageName bundle.Channels = af.GetChannels() } else { + // nolint:stylecheck return fmt.Errorf("Could not find annotations file") } @@ -185,6 +186,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { return nil, fmt.Errorf("bundle missing csv") } + // nolint:prealloc var derived []Property if len(csv.GetAnnotations()) > 0 { properties, ok := csv.GetAnnotations()[PropertyKey] @@ -237,6 +239,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { // propertySet returns the deduplicated set of a property list. func propertySet(properties []Property) []Property { var ( + // nolint:prealloc set []Property visited = map[string]struct{}{} ) diff --git a/pkg/registry/populator.go b/pkg/registry/populator.go index 730d27fb9..7747effc1 100644 --- a/pkg/registry/populator.go +++ b/pkg/registry/populator.go @@ -151,6 +151,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) // globalSanityCheck should have verified this to be a head without anything replacing it // and that we have a single overwrite per package + // nolint:nestif if len(i.overwrittenImages) > 0 { if overwriter, ok := i.loader.(HeadOverwriter); ok { // Assume loader has some way to handle overwritten heads if HeadOverwriter isn't implemented explicitly @@ -180,6 +181,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) } } default: + // nolint:stylecheck return fmt.Errorf("Unsupported update mode") } @@ -262,6 +264,7 @@ func (i *DirectoryPopulator) loadManifestsSemver(bundle *Bundle, skippatch bool) } // loadOperatorBundle adds the package information to the loader's store +// nolint:unused func (i *DirectoryPopulator) loadOperatorBundle(manifest PackageManifest, bundle *Bundle) error { if manifest.PackageName == "" { return nil diff --git a/pkg/registry/registry_to_model.go b/pkg/registry/registry_to_model.go index 0ba64c72d..1efd5fad4 100644 --- a/pkg/registry/registry_to_model.go +++ b/pkg/registry/registry_to_model.go @@ -91,6 +91,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e } var ( + // nolint:prealloc props []property.Property objects []string ) diff --git a/pkg/registry/types.go b/pkg/registry/types.go index 9b18b0661..4105aaa3d 100644 --- a/pkg/registry/types.go +++ b/pkg/registry/types.go @@ -286,6 +286,7 @@ func (gd *GVKDependency) Validate() []error { func (ld *LabelDependency) Validate() []error { errs := []error{} if *ld == (LabelDependency{}) { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Label information is missing")) } return errs @@ -295,13 +296,16 @@ func (ld *LabelDependency) Validate() []error { func (pd *PackageDependency) Validate() []error { errs := []error{} if pd.PackageName == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package name is empty")) } if pd.Version == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package version is empty")) } else { _, err := semver.ParseRange(pd.Version) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid semver format version")) } } @@ -312,15 +316,18 @@ func (pd *PackageDependency) Validate() []error { func (cc *CelConstraint) Validate() []error { errs := []error{} if cc.Cel == nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL field is missing")) } else { if cc.Cel.Rule == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL expression is missing")) return errs } validator := constraints.NewCelEnvironment() _, err := validator.Validate(cc.Cel.Rule) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid CEL expression: %s", err.Error())) } } @@ -329,6 +336,7 @@ func (cc *CelConstraint) Validate() []error { // GetDependencies returns the list of dependency func (d *DependenciesFile) GetDependencies() []*Dependency { + // nolint:prealloc var dependencies []*Dependency for _, item := range d.Dependencies { dep := item diff --git a/pkg/sqlite/configmap.go b/pkg/sqlite/configmap.go index 44e2302cc..be51fc65c 100644 --- a/pkg/sqlite/configmap.go +++ b/pkg/sqlite/configmap.go @@ -66,14 +66,14 @@ func (c *ConfigMapLoader) Populate() error { return fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCRDName) } - crdListJson, err := yaml.YAMLToJSON([]byte(crdListYaml)) + crdListJSON, err := yaml.YAMLToJSON([]byte(crdListYaml)) if err != nil { c.log.WithError(err).Debug("error loading CRD list") return err } var parsedCRDList []v1beta1.CustomResourceDefinition - if err := json.Unmarshal(crdListJson, &parsedCRDList); err != nil { + if err := json.Unmarshal(crdListJSON, &parsedCRDList); err != nil { c.log.WithError(err).Debug("error parsing CRD list") return err } @@ -106,14 +106,14 @@ func (c *ConfigMapLoader) Populate() error { errs = append(errs, fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCSVName)) return utilerrors.NewAggregate(errs) } - csvListJson, err := yaml.YAMLToJSON([]byte(csvListYaml)) + csvListJSON, err := yaml.YAMLToJSON([]byte(csvListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading CSV list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedCSVList []registry.ClusterServiceVersion - err = json.Unmarshal(csvListJson, &parsedCSVList) + err = json.Unmarshal(csvListJSON, &parsedCSVList) if err != nil { errs = append(errs, fmt.Errorf("error parsing CSV list: %s", err)) return utilerrors.NewAggregate(errs) @@ -164,14 +164,14 @@ func (c *ConfigMapLoader) Populate() error { return utilerrors.NewAggregate(errs) } - packageListJson, err := yaml.YAMLToJSON([]byte(packageListYaml)) + packageListJSON, err := yaml.YAMLToJSON([]byte(packageListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading package list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedPackageManifests []registry.PackageManifest - err = json.Unmarshal(packageListJson, &parsedPackageManifests) + err = json.Unmarshal(packageListJSON, &parsedPackageManifests) if err != nil { errs = append(errs, fmt.Errorf("error parsing package list: %s", err)) return utilerrors.NewAggregate(errs) diff --git a/pkg/sqlite/conversion.go b/pkg/sqlite/conversion.go index a5cd9bb6c..47d2257f7 100644 --- a/pkg/sqlite/conversion.go +++ b/pkg/sqlite/conversion.go @@ -40,6 +40,7 @@ func initializeModelPackages(ctx context.Context, q *SQLQuerier) (model.Model, e return nil, err } + // nolint:prealloc var rPkgs []registry.PackageManifest for _, pkgName := range pkgNames { rPkg, err := q.GetPackage(ctx, pkgName) diff --git a/pkg/sqlite/db_options.go b/pkg/sqlite/db_options.go index e09bfbc03..5d43615f1 100644 --- a/pkg/sqlite/db_options.go +++ b/pkg/sqlite/db_options.go @@ -4,12 +4,14 @@ import ( "database/sql" ) +// nolint:stylecheck type DbOptions struct { // MigratorBuilder is a function that returns a migrator instance MigratorBuilder func(*sql.DB) (Migrator, error) EnableAlpha bool } +// nolint:stylecheck type DbOption func(*DbOptions) func defaultDBOptions() *DbOptions { diff --git a/pkg/sqlite/deprecate.go b/pkg/sqlite/deprecate.go index 4ac3d61eb..80e11fc91 100644 --- a/pkg/sqlite/deprecate.go +++ b/pkg/sqlite/deprecate.go @@ -72,6 +72,7 @@ func (d *PackageDeprecator) MaybeRemovePackages() error { var errs []error var removedBundlePaths []string + // nolint:prealloc var remainingBundlePaths []string // Iterate over bundles list - see if any bundle is the head of a default channel in a package diff --git a/pkg/sqlite/load.go b/pkg/sqlite/load.go index 86ef768eb..fa59654fd 100644 --- a/pkg/sqlite/load.go +++ b/pkg/sqlite/load.go @@ -69,7 +69,7 @@ func (s *sqlLoader) AddOperatorBundle(bundle *registry.Bundle) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -123,6 +123,7 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } if substitutesFor != "" && !s.enableAlpha { + // nolint:stylecheck return fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.") } @@ -204,6 +205,7 @@ func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error if err != nil { return fmt.Errorf("failed to obtain substitutes : %s", err) } + // nolint:nestif if substitutesFor != "" { // Update any replaces that reference the substituted-for bundle _, err = updateBundleReplaces.Exec(csvName, substitutesFor) @@ -406,7 +408,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() var errs []error @@ -506,6 +508,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { // If the number of nodes is 5 and the startDepth is 3, the expected depth is 7 (3, 4, 5, 6, 7) expectedDepth := len(channel.Nodes) + startDepth - 1 if expectedDepth != depth { + // nolint:stylecheck err := fmt.Errorf("Invalid graph: some (non-bottom) nodes defined in the graph were not mentioned as replacements of any node (%d != %d)", expectedDepth, depth) errs = append(errs, err) } @@ -532,7 +535,7 @@ func (s *sqlLoader) AddPackageChannels(manifest registry.PackageManifest) error return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmPackage(tx, manifest.PackageName); err != nil { @@ -591,7 +594,8 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani } var ( - errs []error + errs []error + // nolint:prealloc channels []registry.PackageChannel hasDefault bool ) @@ -716,6 +720,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani // If we find 'replaces' in the circuit list then we've seen it already, break out if _, ok := replaceCycle[replaces]; ok { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Cycle detected, %s replaces %s", channelEntryCSVName, replaces)) break } @@ -731,6 +736,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani break } if _, _, _, err := s.getBundleSkipsReplacesVersion(tx, replaces); err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid bundle %s, replaces nonexistent bundle %s", c.CurrentCSVName, replaces)) break } @@ -749,7 +755,7 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() removeNonHeadBundles, err := tx.Prepare(` @@ -949,7 +955,7 @@ func (s *sqlLoader) RemovePackage(packageName string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() csvNames, err := s.getCSVNames(tx, packageName) @@ -1058,7 +1064,7 @@ func (s *sqlLoader) AddBundlePackageChannels(manifest registry.PackageManifest, return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -1480,7 +1486,7 @@ func (s *sqlLoader) DeprecateBundle(path string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() name, version, err := getBundleNameAndVersionForImage(tx, path) @@ -1590,7 +1596,7 @@ func (s *sqlLoader) RemoveStrandedBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmStrandedBundles(tx); err != nil { @@ -1740,7 +1746,7 @@ func (d *DeprecationAwareLoader) clearLastDeprecatedInPackage(pkg string) error return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // The last deprecated bundles for a package will still have "tombstone" records in channel_entry (among other tables). @@ -1768,7 +1774,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // check if bundle has anything that replaces it getBundlesThatReplaceHeadQuery := `SELECT DISTINCT operatorbundle.name AS replaces, channel_entry.channel_name diff --git a/pkg/sqlite/loadprocs.go b/pkg/sqlite/loadprocs.go index 2ce93c605..218f2cda1 100644 --- a/pkg/sqlite/loadprocs.go +++ b/pkg/sqlite/loadprocs.go @@ -41,6 +41,7 @@ func addReplaces(tx *sql.Tx, replacesID, entryID int64) error { return nil } +// nolint:unused func addPackage(tx *sql.Tx, packageName string) error { addPackage, err := tx.Prepare("insert into package(name) values(?)") if err != nil { @@ -71,6 +72,7 @@ func addPackageIfNotExists(tx *sql.Tx, packageName string) error { return nil } +// nolint:unused func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { addChannel, err := tx.Prepare("insert into channel(name, package_name, head_operatorbundle_name) values(?, ?, ?)") if err != nil { @@ -86,6 +88,7 @@ func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error return nil } +// nolint:unused func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { updateChannel, err := tx.Prepare("update channel set head_operatorbundle_name = ? where name = ? and package_name = ?") if err != nil { diff --git a/pkg/sqlite/migrations/001_related_images.go b/pkg/sqlite/migrations/001_related_images.go index 3b3c8c36b..22045e136 100644 --- a/pkg/sqlite/migrations/001_related_images.go +++ b/pkg/sqlite/migrations/001_related_images.go @@ -45,25 +45,25 @@ func getCSV(ctx context.Context, tx *sql.Tx, name string) (*registry.ClusterServ return nil, err } - var csvJson sql.NullString + var csvJSON sql.NullString if !rows.Next() { return nil, fmt.Errorf("bundle %s not found", name) } - if err := rows.Scan(&csvJson); err != nil { + if err := rows.Scan(&csvJSON); err != nil { return nil, err } - if !csvJson.Valid { + if !csvJSON.Valid { return nil, fmt.Errorf("bad value for csv") } csv := ®istry.ClusterServiceVersion{} - if err := json.Unmarshal([]byte(csvJson.String), csv); err != nil { + if err := json.Unmarshal([]byte(csvJSON.String), csv); err != nil { return nil, err } return csv, nil } func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into related_image(image, operatorbundle_name) values(?,?)` + addSQL := `insert into related_image(image, operatorbundle_name) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling related images: %v", err) @@ -83,7 +83,7 @@ func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { images[k] = struct{}{} } for img := range images { - if _, err := tx.ExecContext(ctx, addSql, img, name); err != nil { + if _, err := tx.ExecContext(ctx, addSQL, img, name); err != nil { logrus.Warnf("error backfilling related images: %v", err) continue } diff --git a/pkg/sqlite/migrations/003_required_apis.go b/pkg/sqlite/migrations/003_required_apis.go index 0253c5119..08c4af92d 100644 --- a/pkg/sqlite/migrations/003_required_apis.go +++ b/pkg/sqlite/migrations/003_required_apis.go @@ -9,14 +9,15 @@ import ( "github.com/sirupsen/logrus" ) +// nolint:stylecheck const RequiredApiMigrationKey = 3 // Register this migration func init() { - registerMigration(RequiredApiMigrationKey, requiredApiMigration) + registerMigration(RequiredApiMigrationKey, requiredAPIMigration) } -var requiredApiMigration = &Migration{ +var requiredAPIMigration = &Migration{ Id: RequiredApiMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { sql := ` @@ -37,8 +38,8 @@ var requiredApiMigration = &Migration{ if err != nil { return err } - for entryId, bundle := range bundles { - if err := extractRequiredApis(ctx, tx, entryId, bundle); err != nil { + for entryID, bundle := range bundles { + if err := extractRequiredApis(ctx, tx, entryID, bundle); err != nil { logrus.Warnf("error backfilling required apis: %v", err) continue } @@ -67,20 +68,20 @@ func getChannelEntryBundles(ctx context.Context, tx *sql.Tx) (map[int64]string, entries := map[int64]string{} for rows.Next() { - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString - if err = rows.Scan(&entryId, &name); err != nil { + if err = rows.Scan(&entryID, &name); err != nil { return nil, err } - if !entryId.Valid || !name.Valid { + if !entryID.Valid || !name.Valid { continue } - entries[entryId.Int64] = name.String + entries[entryID.Int64] = name.String } return entries, nil } -func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name string) error { +func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryID int64, name string) error { addAPI, err := tx.Prepare("insert or replace into api(group_name, version, kind, plural) values(?, ?, ?, ?)") if err != nil { return err @@ -91,12 +92,12 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st } }() - addApiRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") + addAPIRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") if err != nil { return err } defer func() { - if err := addApiRequirer.Close(); err != nil { + if err := addAPIRequirer.Close(); err != nil { logrus.WithError(err).Warningf("error closing prepared statement") } }() @@ -116,7 +117,7 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st if _, err := addAPI.Exec(group, crd.Version, crd.Kind, plural); err != nil { return err } - if _, err := addApiRequirer.Exec(group, crd.Version, crd.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(group, crd.Version, crd.Kind, entryID); err != nil { return err } } @@ -126,7 +127,7 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st if _, err := addAPI.Exec(api.Group, api.Version, api.Kind, api.Name); err != nil { return err } - if _, err := addApiRequirer.Exec(api.Group, api.Version, api.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(api.Group, api.Version, api.Kind, entryID); err != nil { return err } } diff --git a/pkg/sqlite/migrations/005_version_skiprange.go b/pkg/sqlite/migrations/005_version_skiprange.go index 60b3c87ad..6a825debc 100644 --- a/pkg/sqlite/migrations/005_version_skiprange.go +++ b/pkg/sqlite/migrations/005_version_skiprange.go @@ -75,7 +75,7 @@ var versionSkipRangeMigration = &Migration{ } func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into operatorbundle(version, skiprange) values(?,?)` + addSQL := `insert into operatorbundle(version, skiprange) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling versioning: %v", err) @@ -89,6 +89,6 @@ func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { version = "" } - _, err = tx.ExecContext(ctx, addSql, version, skiprange) + _, err = tx.ExecContext(ctx, addSQL, version, skiprange) return err } diff --git a/pkg/sqlite/migrations/006_associate_apis_with_bundle.go b/pkg/sqlite/migrations/006_associate_apis_with_bundle.go index f70436f1d..0e57e67fc 100644 --- a/pkg/sqlite/migrations/006_associate_apis_with_bundle.go +++ b/pkg/sqlite/migrations/006_associate_apis_with_bundle.go @@ -11,7 +11,7 @@ const AssociateApisWithBundleMigrationKey = 6 // Register this migration func init() { - registerMigration(AssociateApisWithBundleMigrationKey, bundleApiMigration) + registerMigration(AssociateApisWithBundleMigrationKey, bundleAPIMigration) } // This migration moves the link between the provided and required apis table from the channel_entry to the @@ -24,7 +24,7 @@ func init() { // api_provider: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), // api_requirer: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), -var bundleApiMigration = &Migration{ +var bundleAPIMigration = &Migration{ Id: AssociateApisWithBundleMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { createNew := ` diff --git a/pkg/sqlite/migrations/007_replaces_skips.go b/pkg/sqlite/migrations/007_replaces_skips.go index 7825e89fe..3385ee4a8 100644 --- a/pkg/sqlite/migrations/007_replaces_skips.go +++ b/pkg/sqlite/migrations/007_replaces_skips.go @@ -97,8 +97,8 @@ func extractReplaces(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { return err } - updateSql := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` - _, err = tx.ExecContext(ctx, updateSql, replaces, strings.Join(skips, ","), name) + updateSQL := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` + _, err = tx.ExecContext(ctx, updateSQL, replaces, strings.Join(skips, ","), name) return err } diff --git a/pkg/sqlite/migrations/009_properties.go b/pkg/sqlite/migrations/009_properties.go index 046675611..7a89859b8 100644 --- a/pkg/sqlite/migrations/009_properties.go +++ b/pkg/sqlite/migrations/009_properties.go @@ -75,12 +75,12 @@ var propertiesMigration = &Migration{ } // update the serialized value to omit the dependency type - updateDependencySql := ` + updateDependencySQL := ` UPDATE dependencies SET value = (SELECT json_remove(value, "$.type") FROM dependencies WHERE operatorbundle_name=dependencies.operatorbundle_name)` - _, err = tx.ExecContext(ctx, updateDependencySql) + _, err = tx.ExecContext(ctx, updateDependencySQL) if err != nil { return err } diff --git a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go index bee961621..d488775b0 100644 --- a/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go +++ b/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go @@ -15,12 +15,12 @@ func init() { var bundlePathPkgPropertyMigration = &Migration{ Id: BundlePathPkgMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = (SELECT bundlepath FROM operatorbundle WHERE operatorbundle_name = operatorbundle.name AND operatorbundle_version = operatorbundle.version)` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } @@ -28,11 +28,11 @@ var bundlePathPkgPropertyMigration = &Migration{ return nil }, Down: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = null WHERE type = "olm.package"` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } diff --git a/pkg/sqlite/migrations/migrations.go b/pkg/sqlite/migrations/migrations.go index b9bb60fba..475bb7cd6 100644 --- a/pkg/sqlite/migrations/migrations.go +++ b/pkg/sqlite/migrations/migrations.go @@ -8,6 +8,7 @@ import ( ) type Migration struct { + // nolint:stylecheck Id int Up func(context.Context, *sql.Tx) error Down func(context.Context, *sql.Tx) error diff --git a/pkg/sqlite/migrator.go b/pkg/sqlite/migrator.go index 82bacc834..8f4c1f2aa 100644 --- a/pkg/sqlite/migrator.go +++ b/pkg/sqlite/migrator.go @@ -86,12 +86,12 @@ func (m *SQLLiteMigrator) Up(ctx context.Context, migrations migrations.Migratio } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version+1 { + if migration.Id != currentVersion+1 { return fmt.Errorf("migration applied out of order") } @@ -127,12 +127,12 @@ func (m *SQLLiteMigrator) Down(ctx context.Context, migrations migrations.Migrat } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version { + if migration.Id != currentVersion { return fmt.Errorf("migration applied out of order") } diff --git a/pkg/sqlite/migrator_test.go b/pkg/sqlite/migrator_test.go index 1fcd7e2e5..8fd1aee2e 100644 --- a/pkg/sqlite/migrator_test.go +++ b/pkg/sqlite/migrator_test.go @@ -134,7 +134,11 @@ func TestSQLLiteMigrator_Down(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, 0)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + // TODO: this shouldn't be unconditionally rolled back + // run_test_migration, run_migration_out_of_order, and run_error_migration each have at least one scenario + // where rollback is no longer possible (committed or rolled back already) + // In the interest of retaining function and a good lint bright line, we'll just ignore the error here + _ = tx.Rollback() } if err := m.Down(tt.args.ctx, tt.args.migrations); (err != nil) != tt.wantErr { t.Errorf("Down() error = %v, wantErr %v", err, tt.wantErr) @@ -150,7 +154,7 @@ func TestSQLLiteMigrator_Down(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) }) @@ -292,7 +296,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, -1)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } if err := m.Up(tt.args.ctx, tt.args.migrations); (err != nil) != tt.wantErr { t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr) @@ -308,7 +312,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) @@ -326,7 +330,7 @@ func TestSQLLiteMigrator_Up(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, NilVersion, version) }) @@ -409,7 +413,7 @@ func TestSQLLiteMigrator_Migrate(t *testing.T) { require.NoError(t, m.setVersion(context.TODO(), tx, tt.startVersion)) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } if err := m.Migrate(context.TODO()); (err != nil) != tt.wantErr { t.Errorf("Migrate() error = %v, wantErr %v", err, tt.wantErr) @@ -425,7 +429,7 @@ func TestSQLLiteMigrator_Migrate(t *testing.T) { version, err = m.version(context.TODO(), tx) require.NoError(t, err) require.NoError(t, tx.Commit()) - tx.Rollback() + _ = tx.Rollback() } require.Equal(t, tt.wantVersion, version) }) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 4a1521a7f..a1dff6d3c 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -63,6 +63,7 @@ func NewSQLLiteQuerier(dbFilename string, opts ...SQLiteQuerierOption) (*SQLQuer return NewSQLLiteQuerierFromDb(db, opts...), nil } +// nolint:stylecheck func NewSQLLiteQuerierFromDb(db *sql.DB, opts ...SQLiteQuerierOption) *SQLQuerier { return NewSQLLiteQuerierFromDBQuerier(dbQuerierAdapter{db}, opts...) } @@ -241,13 +242,13 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s %s", pkgName, channelName, csvName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -265,7 +266,7 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -365,13 +366,13 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var outName sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -389,7 +390,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -489,8 +490,8 @@ func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, gro var channelNameSQL sql.NullString var bundleNameSQL sql.NullString var replacesSQL sql.NullString - var min_depth sql.NullInt64 - if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &min_depth); err != nil { + var minDepth sql.NullInt64 + if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &minDepth); err != nil { return nil, err } @@ -532,17 +533,17 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio if !rows.Next() { return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var bundle sql.NullString var bundlePath sql.NullString - var min_depth sql.NullInt64 + var minDepth sql.NullInt64 var bundleName sql.NullString var pkgName sql.NullString var channelName sql.NullString var replaces sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &bundle, &bundlePath, &min_depth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &bundle, &bundlePath, &minDepth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { return nil, err } @@ -564,7 +565,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -809,6 +810,7 @@ func (s *SQLQuerier) GetBundlePathsForPackage(ctx context.Context, pkgName strin return nil, err } if imgName.Valid && imgName.String == "" { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find paths to bundle images") } images = append(images, imgName.String) @@ -844,6 +846,7 @@ func (s *SQLQuerier) GetBundlesForPackage(ctx context.Context, pkgName string) ( key.Version = version.String } if key.IsEmpty() { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find identifier for bundle in package %s", pkgName) } bundles[key] = struct{}{} @@ -1047,7 +1050,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) + _ = buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) out.Dependencies = uniqueDeps(out.Dependencies) if props.Valid { @@ -1055,7 +1058,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) + _ = buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) out.Properties = uniqueProps(out.Properties) if err := stream.Send(out); err != nil { return err diff --git a/test/e2e/ctx/ctx.go b/test/e2e/ctx/ctx.go index 1feb90fdd..ca54c7426 100644 --- a/test/e2e/ctx/ctx.go +++ b/test/e2e/ctx/ctx.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + // nolint: stylecheck . "github.com/onsi/ginkgo/v2" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/test/e2e/ctx/provisioner_kubeconfig.go b/test/e2e/ctx/provisioner_kubeconfig.go index 1ce1980d7..05a16f10f 100644 --- a/test/e2e/ctx/provisioner_kubeconfig.go +++ b/test/e2e/ctx/provisioner_kubeconfig.go @@ -26,6 +26,7 @@ func Provision(ctx *TestContext) (func(), error) { } f, err := os.Open(path) + // nolint:nestif if os.IsNotExist(err) { // try in-cluster config // see https://github.com/coreos/etcd-operator/issues/731#issuecomment-283804819