From 2d2634091d6c652e5dabf3ed45649c902c6d16b8 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Tue, 1 Oct 2024 21:02:50 +0300 Subject: [PATCH 1/7] components, logger: use contextual logging approach Switch ReconcileComponent from passing logger explicitly to wrapping it into context[1][2] Makes one parameter less to pass and will allow called utilities to report component context where they are called from. No user or logging format impact until utilities takes contextual logging in use. [1] https://www.kubernetes.dev/blog/2022/05/25/contextual-logging/ [2] https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/3077-contextual-logging/README.md Credits-To: Bartosz Majsak bartosz.majsak@gmail.com Signed-off-by: Yauheni Kaliuta --- components/codeflare/codeflare.go | 3 +-- components/component.go | 2 +- components/dashboard/dashboard.go | 2 +- components/datasciencepipelines/datasciencepipelines.go | 3 +-- components/kserve/kserve.go | 4 ++-- components/kueue/kueue.go | 4 ++-- components/modelmeshserving/modelmeshserving.go | 3 +-- components/modelregistry/modelregistry.go | 4 ++-- components/ray/ray.go | 4 ++-- components/trainingoperator/trainingoperator.go | 4 ++-- components/trustyai/trustyai.go | 4 ++-- components/workbenches/workbenches.go | 4 ++-- .../datasciencecluster/datasciencecluster_controller.go | 4 +++- 13 files changed, 22 insertions(+), 23 deletions(-) diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index 5e731c28ba4..c3c2ec48474 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -74,11 +73,11 @@ func (c *CodeFlare) GetComponentName() string { func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := c.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/component.go b/components/component.go index c43cef7ac92..8256fbb0542 100644 --- a/components/component.go +++ b/components/component.go @@ -82,7 +82,7 @@ type ManifestsConfig struct { type ComponentInterface interface { Init(ctx context.Context, platform cluster.Platform) error - ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, + ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentStatus bool) error Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec) error GetComponentName() string diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index d2ed096b3bd..f8b077d4416 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -84,13 +84,13 @@ func (d *Dashboard) GetComponentName() string { func (d *Dashboard) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentExist bool, ) error { entryPath := DefaultPath + l := logf.FromContext(ctx) enabled := d.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index f0066a6c544..ace22681f6b 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" @@ -95,12 +94,12 @@ func (d *DataSciencePipelines) GetComponentName() string { func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool, ) error { + l := logf.FromContext(ctx) enabled := d.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 85b739285ea..f7edf65479a 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -112,7 +111,8 @@ func (k *Kserve) GetComponentName() string { } func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index ec609317092..73147085783 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -6,7 +6,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -68,8 +67,9 @@ func (k *Kueue) GetComponentName() string { return ComponentName } -func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed if enabled { diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index cb1d07b7838..898d1f8617a 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -103,12 +102,12 @@ func (m *ModelMeshServing) GetComponentName() string { func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool, ) error { + l := logf.FromContext(ctx) enabled := m.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go index dbf577ec8f8..72c9da1e381 100644 --- a/components/modelregistry/modelregistry.go +++ b/components/modelregistry/modelregistry.go @@ -10,7 +10,6 @@ import ( "strings" "text/template" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,8 +96,9 @@ func (m *ModelRegistry) GetComponentName() string { return ComponentName } -func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := m.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/ray/ray.go b/components/ray/ray.go index c8fa30edbd4..f11ad8ec55f 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -69,8 +68,9 @@ func (r *Ray) GetComponentName() string { return ComponentName } -func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go index a6a7c8f87e7..d422bc9a28a 100644 --- a/components/trainingoperator/trainingoperator.go +++ b/components/trainingoperator/trainingoperator.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -70,8 +69,9 @@ func (r *TrainingOperator) GetComponentName() string { return ComponentName } -func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 45a211e79c0..114d099901b 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -7,7 +7,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -79,8 +78,9 @@ func (t *TrustyAI) GetComponentName() string { return ComponentName } -func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := t.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed entryPath := DefaultPath diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index c11f1e24297..b744a1660d6 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -111,8 +110,9 @@ func (w *Workbenches) GetComponentName() string { return ComponentName } -func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) // Set default notebooks namespace // Create rhods-notebooks namespace in managed platforms enabled := w.GetManagementState() == operatorv1.Managed diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index af00a344e73..8570f270028 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -46,6 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -313,7 +314,8 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } // Reconcile component componentLogger := newComponentLogger(log, componentName, r.DataScienceCluster.DSCISpec) - err := component.ReconcileComponent(ctx, r.Client, componentLogger, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) + componentCtx := logf.IntoContext(ctx, componentLogger) + err := component.ReconcileComponent(componentCtx, r.Client, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) // TODO: replace this hack with a full refactor of component status in the future From eab41cbf2bb8f1cc194090cb6c4bd15ccba71a20 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 7 Oct 2024 22:07:53 +0300 Subject: [PATCH 2/7] logger, controllers: use common logging level, INFO by default Remove increasing logging level for controllers (it was also passed to components if not overridden from DSCI) since: - it made logging inconsistent. The base contoller runtime logger is set up with INFO level for all log modes, so when controllers are configured for Error level, user sees INFO messages from both controller-runtime and other parts of operator which use controller-runtime's logger directly; - since the base logger is configured for INFO, there is no difference in levels between "default" and "devel". Having levels 1 and 2 there is misleading. Update documentation. This patch changes default logging, former filtered Info messages are displayed now. There is no _big_ difference in practice since currently the log is anyway full of info messages from parts which do not use reconciler's logger, like: {"level":"info","ts":"2024-10-09T13:23:11Z","msg":"waiting for 1 deployment to be ready for dashboard"} Signed-off-by: Yauheni Kaliuta --- README.md | 15 +++++---------- main.go | 8 ++++---- pkg/logger/logger.go | 17 ----------------- 3 files changed, 9 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index cb7b022887f..b1b7971d47f 100644 --- a/README.md +++ b/README.md @@ -224,17 +224,12 @@ This will ensure that the doc for the apis are updated accordingly. #### Controller level -Logger on all controllers can only be changed from CSV with parameters: --log-mode devel -valid value: "" (as default) || prod || production || devel || development +Global logger configuration can be changed with a command line switch `--log-mode ` +for example from CSV. Valid values for ``: "" (as default) || prod || production || devel || development. -This mainly impacts logging for operator pod startup, generating common resource, monitoring deployment. - -| --log-mode value | mapping Log level | Comments | -| ---------------- | ------------------- | -------------- | -| devel | debug / 0 | lowest level | -| "" | info / 1 | default option | -| default | info / 1 | default option | -| prod | error / 2 | highest level | +Verbosity level is INFO. +To fine tune zap backend [standard operator sdk zap switches](https://sdk.operatorframework.io/docs/building-operators/golang/references/logging/) +can be used. #### Component level diff --git a/main.go b/main.go index 6eb6cffe420..a50b4c96b40 100644 --- a/main.go +++ b/main.go @@ -244,7 +244,7 @@ func main() { //nolint:funlen,maintidx if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), ApplicationsNamespace: dscApplicationsNamespace, }).SetupWithManager(ctx, mgr); err != nil { @@ -255,7 +255,7 @@ func main() { //nolint:funlen,maintidx if err = (&dscctrl.DataScienceClusterReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), DataScienceCluster: &dscctrl.DataScienceClusterConfig{ DSCISpec: &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: dscApplicationsNamespace, @@ -270,7 +270,7 @@ func main() { //nolint:funlen,maintidx if err = (&secretgenerator.SecretGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretGenerator") os.Exit(1) @@ -279,7 +279,7 @@ func main() { //nolint:funlen,maintidx if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") os.Exit(1) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index dc8a4cf97f6..ed569084b28 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -3,19 +3,12 @@ package logger import ( "flag" "os" - "strings" "github.com/go-logr/logr" "go.uber.org/zap/zapcore" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var logLevelMapping = map[string]int{ - "devel": 0, - "default": 1, // default one when not set log-mode - "prod": 2, -} - // NewNamedLogger creates a new logger for a component. // If the mode is set (so can be different from the default one), // it will create a new logger with the specified mode's options. @@ -26,16 +19,6 @@ func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { return log.WithName(name) } -// in each controller, to use different log level. -func LogWithLevel(logger logr.Logger, level string) logr.Logger { - level = strings.TrimSpace(level) - verbosityLevel, ok := logLevelMapping[level] - if !ok { - verbosityLevel = 1 // fallback to info level - } - return logger.V(verbosityLevel) -} - func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger { opts := newOptions(mode) overrideOptions(opts, override) From a33ef07a1be50d5bb250778b49687a92594f9678 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 7 Oct 2024 23:10:38 +0300 Subject: [PATCH 3/7] logger: import controller-runtime zap as ctrlzap To avoid patch polluting with the next changes where uber's zap is imported as `zap`. Signed-off-by: Yauheni Kaliuta --- pkg/logger/logger.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index ed569084b28..b2e8e3eec17 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -6,7 +6,7 @@ import ( "github.com/go-logr/logr" "go.uber.org/zap/zapcore" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // NewNamedLogger creates a new logger for a component. @@ -19,7 +19,7 @@ func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { return log.WithName(name) } -func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger { +func NewLoggerWithOptions(mode string, override *ctrlzap.Options) logr.Logger { opts := newOptions(mode) overrideOptions(opts, override) return newLogger(opts) @@ -31,27 +31,27 @@ func NewLogger(mode string) logr.Logger { return newLogger(newOptions(mode)) } -func newLogger(opts *zap.Options) logr.Logger { - return zap.New(zap.UseFlagOptions(opts)) +func newLogger(opts *ctrlzap.Options) logr.Logger { + return ctrlzap.New(ctrlzap.UseFlagOptions(opts)) } -func newOptions(mode string) *zap.Options { - var opts zap.Options +func newOptions(mode string) *ctrlzap.Options { + var opts ctrlzap.Options switch mode { case "devel", "development": // the most logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: true, StacktraceLevel: zapcore.WarnLevel, Level: zapcore.InfoLevel, DestWriter: os.Stdout, } case "prod", "production": // the least logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, Level: zapcore.InfoLevel, DestWriter: os.Stdout, - EncoderConfigOptions: []zap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { + EncoderConfigOptions: []ctrlzap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { config.EncodeTime = zapcore.ISO8601TimeEncoder // human readable not epoch config.EncodeDuration = zapcore.SecondsDurationEncoder config.LevelKey = "LogLevel" @@ -63,7 +63,7 @@ func newOptions(mode string) *zap.Options { }}, } default: - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, Level: zapcore.InfoLevel, @@ -73,7 +73,7 @@ func newOptions(mode string) *zap.Options { return &opts } -func overrideOptions(orig, override *zap.Options) { +func overrideOptions(orig, override *ctrlzap.Options) { // Development is boolean, cannot check for nil, so check if it was set isDevelopmentSet := false flag.Visit(func(f *flag.Flag) { From fb0c274fe4d6713da637ad17540517a73002f0a6 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 7 Oct 2024 23:09:33 +0300 Subject: [PATCH 4/7] logger: add logLevel to DSCI devFlags Allow to override global zap log level from DSCI devFlags. Accepts the same values as to `--zap-log-level` command line switch. Signed-off-by: Yauheni Kaliuta --- .../v1/dscinitialization_types.go | 3 + ...ion.opendatahub.io_dscinitializations.yaml | 4 ++ .../dscinitialization_controller.go | 10 +++ docs/api-overview.md | 1 + pkg/logger/logger.go | 62 ++++++++++++++++++- 5 files changed, 77 insertions(+), 3 deletions(-) diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index 50f758a5df8..58baa9c1299 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -86,6 +86,9 @@ type DevFlags struct { // +kubebuilder:validation:Enum=devel;development;prod;production;default // +kubebuilder:default="production" LogMode string `json:"logmode,omitempty"` + // Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). + // +optional + LogLevel string `json:"logLevel,omitempty"` } type TrustedCABundleSpec struct { diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index dd381696bb1..5e76a4f65b3 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -67,6 +67,10 @@ spec: Internal development useful field to test customizations. This is not recommended to be used in production environment. properties: + logLevel: + description: Override Zap log level. Can be "debug", "info", "error" + or a number (more verbose). + type: string logmode: default: production enum: diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 845418ac843..d7f1cc16569 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -48,6 +48,7 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -101,6 +102,15 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re instance = &instances.Items[0] } + if instance.Spec.DevFlags != nil { + level := instance.Spec.DevFlags.LogLevel + log.V(1).Info("Setting log level", "level", level) + err := logger.SetLevel(level) + if err != nil { + log.Error(err, "Failed to set log level", "level", level) + } + } + if instance.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(instance, finalizerName) { log.Info("Adding finalizer for DSCInitialization", "name", instance.Name, "finalizer", finalizerName) diff --git a/docs/api-overview.md b/docs/api-overview.md index 3c963f15088..7eae0bc7c9d 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -670,6 +670,7 @@ _Appears in:_ | --- | --- | --- | --- | | `manifestsUri` _string_ | Custom manifests uri for odh-manifests | | | | `logmode` _string_ | | production | Enum: [devel development prod production default]
| +| `logLevel` _string_ | Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). | | | #### Monitoring diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index b2e8e3eec17..49a112eb92d 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,14 +1,68 @@ package logger import ( + "errors" "flag" + "fmt" "os" + "strconv" + "strings" + "sync/atomic" "github.com/go-logr/logr" + "go.uber.org/zap" "go.uber.org/zap/zapcore" ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" ) +var logLevel atomic.Value + +// copy from controller-runtime/pkg/log/zap/flag.go. +var levelStrings = map[string]zapcore.Level{ + "debug": zap.DebugLevel, + "info": zap.InfoLevel, + "error": zap.ErrorLevel, +} + +// adjusted copy from controller-runtime/pkg/log/zap/flag.go, keep the same argument name. +func stringToLevel(flagValue string) (zapcore.Level, error) { + level, validLevel := levelStrings[strings.ToLower(flagValue)] + if validLevel { + return level, nil + } + logLevel, err := strconv.Atoi(flagValue) + if err != nil { + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) + } + if logLevel > 0 { + intLevel := -1 * logLevel + return zapcore.Level(int8(intLevel)), nil + } + + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) +} + +func SetLevel(levelStr string) error { + if levelStr == "" { + return nil + } + levelNum, err := stringToLevel(levelStr) + if err != nil { + return err + } + + // ctrlzap.addDefauls() uses a pointer to the AtomicLevel, + // but ctrlzap.(*levelFlag).Set() the structure itsef. + // So use the structure and always set the value in newOptions() to addDefaults() call + level, ok := logLevel.Load().(zap.AtomicLevel) + if !ok { + return errors.New("stored loglevel is not of type *zap.AtomicLevel") + } + + level.SetLevel(levelNum) + return nil +} + // NewNamedLogger creates a new logger for a component. // If the mode is set (so can be different from the default one), // it will create a new logger with the specified mode's options. @@ -22,6 +76,7 @@ func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { func NewLoggerWithOptions(mode string, override *ctrlzap.Options) logr.Logger { opts := newOptions(mode) overrideOptions(opts, override) + logLevel.Store(opts.Level) return newLogger(opts) } @@ -37,19 +92,19 @@ func newLogger(opts *ctrlzap.Options) logr.Logger { func newOptions(mode string) *ctrlzap.Options { var opts ctrlzap.Options + level := zap.NewAtomicLevelAt(zapcore.InfoLevel) + switch mode { case "devel", "development": // the most logging verbosity opts = ctrlzap.Options{ Development: true, StacktraceLevel: zapcore.WarnLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } case "prod", "production": // the least logging verbosity opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, EncoderConfigOptions: []ctrlzap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { config.EncodeTime = zapcore.ISO8601TimeEncoder // human readable not epoch @@ -66,10 +121,11 @@ func newOptions(mode string) *ctrlzap.Options { opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } } + + opts.Level = level return &opts } From f9389face2c842ba7cfeac262feb0731d155467d Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 7 Oct 2024 23:57:33 +0300 Subject: [PATCH 5/7] logger, components: always use controller's logger Since the log level is overridable with its own field of devFlags, do not use logmode anymore. It was used to create own logger with own zap backend in case if devFlags exist. Just add name and value to the existing logger instead. Signed-off-by: Yauheni Kaliuta --- README.md | 30 +++++++------------ .../datasciencecluster_controller.go | 11 ++----- main.go | 2 +- pkg/logger/logger.go | 22 +------------- 4 files changed, 16 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index b1b7971d47f..73a43ec3b50 100644 --- a/README.md +++ b/README.md @@ -222,8 +222,6 @@ This will ensure that the doc for the apis are updated accordingly. ### Enabled logging -#### Controller level - Global logger configuration can be changed with a command line switch `--log-mode ` for example from CSV. Valid values for ``: "" (as default) || prod || production || devel || development. @@ -231,12 +229,9 @@ Verbosity level is INFO. To fine tune zap backend [standard operator sdk zap switches](https://sdk.operatorframework.io/docs/building-operators/golang/references/logging/) can be used. -#### Component level - -Logger on components can be changed by DSCI devFlags during runtime. -By default, if not set .spec.devFlags.logmode, it uses INFO level -Modification applies to all components, not only these "Managed" ones. -Update DSCI CR with .spec.devFlags.logmode, see example : +Log level can be changed by DSCI devFlags during runtime by setting +.spec.devFlags.logLevel. It accepts the same values as `--zap-log-level` +command line switch. See example : ```console apiVersion: dscinitialization.opendatahub.io/v1 @@ -245,20 +240,17 @@ metadata: name: default-dsci spec: devFlags: - logmode: development + logLevel: debug ... ``` -Avaiable value for logmode is "devel", "development", "prod", "production". -The first two work the same set to DEBUG level; the later two work the same as using ERROR level. - -| .spec.devFlags.logmode | stacktrace level | verbosity | Output | Comments | -| ---------------------- | ---------------- | --------- | -------- | -------------- | -| devel | WARN | INFO | Console | lowest level, using epoch time | -| development | WARN | INFO | Console | same as devel | -| "" | ERROR | INFO | JSON | default option | -| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | -| production | ERROR | INFO | JSON | same as prod | +| logmode | stacktrace level | verbosity | Output | Comments | +|-------------|------------------|-----------|---------|-----------------------------------------------| +| devel | WARN | INFO | Console | lowest level, using epoch time | +| development | WARN | INFO | Console | same as devel | +| "" | ERROR | INFO | JSON | default option | +| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | +| production | ERROR | INFO | JSON | same as prod | ### Example DSCInitialization diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 8570f270028..dddf4c968dd 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -57,7 +57,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - ctrlogger "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" annotations "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" @@ -313,7 +312,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } } // Reconcile component - componentLogger := newComponentLogger(log, componentName, r.DataScienceCluster.DSCISpec) + componentLogger := newComponentLogger(log, componentName) componentCtx := logf.IntoContext(ctx, componentLogger) err := component.ReconcileComponent(componentCtx, r.Client, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) @@ -366,12 +365,8 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } // newComponentLogger is a wrapper to add DSC name and extract log mode from DSCISpec. -func newComponentLogger(logger logr.Logger, componentName string, dscispec *dsciv1.DSCInitializationSpec) logr.Logger { - mode := "" - if dscispec.DevFlags != nil { - mode = dscispec.DevFlags.LogMode - } - return ctrlogger.NewNamedLogger(logger, "DSC.Components."+componentName, mode) +func newComponentLogger(logger logr.Logger, componentName string) logr.Logger { + return logger.WithName("DSC.Components."+componentName).WithValues("component", componentName) } func (r *DataScienceClusterReconciler) reportError(err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { diff --git a/main.go b/main.go index a50b4c96b40..a146f8783df 100644 --- a/main.go +++ b/main.go @@ -146,7 +146,7 @@ func main() { //nolint:funlen,maintidx flag.Parse() - ctrl.SetLogger(logger.NewLoggerWithOptions(logmode, &opts)) + ctrl.SetLogger(logger.NewLogger(logmode, &opts)) // root context ctx := ctrl.SetupSignalHandler() diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 49a112eb92d..122ce002bff 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -63,30 +63,10 @@ func SetLevel(levelStr string) error { return nil } -// NewNamedLogger creates a new logger for a component. -// If the mode is set (so can be different from the default one), -// it will create a new logger with the specified mode's options. -func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { - if mode != "" { - log = NewLogger(mode) - } - return log.WithName(name) -} - -func NewLoggerWithOptions(mode string, override *ctrlzap.Options) logr.Logger { +func NewLogger(mode string, override *ctrlzap.Options) logr.Logger { opts := newOptions(mode) overrideOptions(opts, override) logLevel.Store(opts.Level) - return newLogger(opts) -} - -// in DSC component, to use different mode for logging, e.g. development, production -// when not set mode it falls to "default" which is used by startup main.go. -func NewLogger(mode string) logr.Logger { - return newLogger(newOptions(mode)) -} - -func newLogger(opts *ctrlzap.Options) logr.Logger { return ctrlzap.New(ctrlzap.UseFlagOptions(opts)) } From 3d39f0594b30348fe8d4e56d4687a4a2f319911d Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 30 Sep 2024 12:40:54 +0300 Subject: [PATCH 6/7] controllers: switch to k8s contextual logger Remove own logger from controllers' reconcilers and switch to k8s contextual logger instead [1]. Use contextual logger for SecretGeneratorReconciler and CertConfigmapGeneratorReconciler setups as well. [1] https://www.kubernetes.dev/blog/2022/05/25/contextual-logging/ Signed-off-by: Yauheni Kaliuta --- .../certconfigmapgenerator_controller.go | 14 ++++++------- .../datasciencecluster_controller.go | 20 +++++++++---------- .../dscinitialization_controller.go | 15 +++++++------- controllers/dscinitialization/monitoring.go | 13 ++++++------ .../dscinitialization/servicemesh_setup.go | 5 +++-- controllers/dscinitialization/suite_test.go | 1 - controllers/dscinitialization/utils.go | 9 +++++---- .../secretgenerator_controller.go | 12 +++++------ main.go | 8 ++------ 9 files changed, 44 insertions(+), 53 deletions(-) diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go index a3ce257dcb3..924e1a15290 100644 --- a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -5,7 +5,6 @@ import ( "context" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -16,6 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -28,13 +28,11 @@ import ( type CertConfigmapGeneratorReconciler struct { Client client.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - log := r.Log - log.Info("Adding controller for Configmap Generation.") +func (r *CertConfigmapGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Configmap Generation.") return ctrl.NewControllerManagedBy(mgr). Named("cert-configmap-generator-controller"). Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.watchTrustedCABundleConfigMapResource), builder.WithPredicates(ConfigMapChangedPredicate)). @@ -45,7 +43,7 @@ func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) er // Reconcile will generate new configmap, odh-trusted-ca-bundle, that includes cluster-wide trusted-ca bundle and custom // ca bundle in every new namespace created. func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Log + log := logf.FromContext(ctx) // Request includes namespace that is newly created or where odh-trusted-ca-bundle configmap is updated. log.Info("Reconciling CertConfigMapGenerator.", " Request.Namespace", req.NamespacedName) // Get namespace instance @@ -109,8 +107,8 @@ func (r *CertConfigmapGeneratorReconciler) watchNamespaceResource(_ context.Cont return nil } -func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == trustedcabundle.CAConfigMapName { log.Info("Cert configmap has been updated, start reconcile") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: a.GetName(), Namespace: a.GetNamespace()}}} diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index dddf4c968dd..77268163152 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -66,7 +66,6 @@ import ( type DataScienceClusterReconciler struct { client.Client Scheme *runtime.Scheme - Log logr.Logger // Recorder to generate events Recorder record.EventRecorder DataScienceCluster *DataScienceClusterConfig @@ -84,7 +83,7 @@ const ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:maintidx,gocyclo - log := r.Log + log := logf.FromContext(ctx) log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) // Get information on version and platform @@ -168,7 +167,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R saved.Status.Phase = status.PhaseError }) if err != nil { - r.reportError(err, instance, "failed to update DataScienceCluster condition") + r.reportError(ctx, err, instance, "failed to update DataScienceCluster condition") return ctrl.Result{}, err } @@ -232,7 +231,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R saved.Status.Release = currentOperatorRelease }) if err != nil { - _ = r.reportError(err, instance, fmt.Sprintf("failed to add conditions to status of DataScienceCluster resource name %s", req.Name)) + _ = r.reportError(ctx, err, instance, fmt.Sprintf("failed to add conditions to status of DataScienceCluster resource name %s", req.Name)) return ctrl.Result{}, err } @@ -290,7 +289,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context, instance *dscv1.DataScienceCluster, platform cluster.Platform, component components.ComponentInterface, ) (*dscv1.DataScienceCluster, error) { - log := r.Log + log := logf.FromContext(ctx) componentName := component.GetComponentName() enabled := component.GetManagementState() == operatorv1.Managed @@ -307,7 +306,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileInit, message, corev1.ConditionUnknown) }) if err != nil { - _ = r.reportError(err, instance, "failed to update DataScienceCluster conditions before first time reconciling "+componentName) + _ = r.reportError(ctx, err, instance, "failed to update DataScienceCluster conditions before first time reconciling "+componentName) // try to continue with reconciliation, as further updates can fix the status } } @@ -320,7 +319,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context if err != nil { // reconciliation failed: log errors, raise event and update status accordingly - instance = r.reportError(err, instance, "failed to reconcile "+componentName+" on DataScienceCluster") + instance = r.reportError(ctx, err, instance, "failed to reconcile "+componentName+" on DataScienceCluster") instance, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { if enabled { if strings.Contains(err.Error(), datasciencepipelines.ArgoWorkflowCRD+" CRD already exists") { @@ -356,7 +355,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } }) if err != nil { - instance = r.reportError(err, instance, "failed to update DataScienceCluster status after reconciling "+componentName) + instance = r.reportError(ctx, err, instance, "failed to update DataScienceCluster status after reconciling "+componentName) return instance, err } @@ -369,9 +368,8 @@ func newComponentLogger(logger logr.Logger, componentName string) logr.Logger { return logger.WithName("DSC.Components."+componentName).WithValues("component", componentName) } -func (r *DataScienceClusterReconciler) reportError(err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { - log := r.Log - log.Error(err, message, "instance.Name", instance.Name) +func (r *DataScienceClusterReconciler) reportError(ctx context.Context, err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { + logf.FromContext(ctx).Error(err, message, "instance.Name", instance.Name) r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DataScienceClusterReconcileError", "%s for instance %s", message, instance.Name) return instance diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index d7f1cc16569..df5e4ede40d 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -22,7 +22,6 @@ import ( "path/filepath" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" @@ -40,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -65,7 +65,6 @@ var managementStateChangeTrustedCA = false type DSCInitializationReconciler struct { client.Client Scheme *runtime.Scheme - Log logr.Logger Recorder record.EventRecorder ApplicationsNamespace string } @@ -79,7 +78,7 @@ type DSCInitializationReconciler struct { // Reconcile contains controller logic specific to DSCInitialization instance updates. func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:funlen,gocyclo,maintidx - log := r.Log + log := logf.FromContext(ctx) log.Info("Reconciling DSCInitialization.", "DSCInitialization Request.Name", req.Name) currentOperatorRelease := cluster.GetRelease() @@ -402,8 +401,8 @@ var dsciPredicateStateChangeTrustedCA = predicate.Funcs{ }, } -func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == "prometheus" && a.GetNamespace() == "redhat-ods-monitoring" { log.Info("Found monitoring configmap has updated, start reconcile") @@ -412,8 +411,8 @@ func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(_ context return nil } -func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *DSCInitializationReconciler) watchMonitoringSecretResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) operatorNs, err := cluster.GetOperatorNamespace() if err != nil { return nil @@ -428,7 +427,7 @@ func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Co } func (r *DSCInitializationReconciler) watchDSCResource(ctx context.Context) []reconcile.Request { - log := r.Log + log := logf.FromContext(ctx) instanceList := &dscv1.DataScienceClusterList{} if err := r.Client.List(ctx, instanceList); err != nil { // do not handle if cannot get list diff --git a/controllers/dscinitialization/monitoring.go b/controllers/dscinitialization/monitoring.go index 96537c76241..f81fff74939 100644 --- a/controllers/dscinitialization/monitoring.go +++ b/controllers/dscinitialization/monitoring.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -39,7 +40,7 @@ var ( // only when reconcile on DSCI CR, initial set to true // if reconcile from monitoring, initial set to false, skip blackbox and rolebinding. func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Context, dscInit *dsciv1.DSCInitialization, initial string) error { - log := r.Log + log := logf.FromContext(ctx) if initial == "init" { // configure Blackbox exporter if err := configureBlackboxExporter(ctx, dscInit, r); err != nil { @@ -89,7 +90,7 @@ func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Con } func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) // Get Deadmansnitch secret deadmansnitchSecret, err := r.waitForManagedSecret(ctx, "redhat-rhods-deadmanssnitch", dsciInit.Spec.Monitoring.Namespace) if err != nil { @@ -200,7 +201,7 @@ func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitializati } func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) // Update rolebinding-viewer err := common.ReplaceStringsInFile(filepath.Join(prometheusManifestsPath, "prometheus-rolebinding-viewer.yaml"), map[string]string{ @@ -348,7 +349,7 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization } func configureBlackboxExporter(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) consoleRoute := &routev1.Route{} err := r.Client.Get(ctx, client.ObjectKey{Name: "console", Namespace: "openshift-console"}, consoleRoute) if err != nil { @@ -438,7 +439,7 @@ func createMonitoringProxySecret(ctx context.Context, cli client.Client, name st } func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // create segment.io only when configmap does not exist in the cluster segmentioConfigMap := &corev1.ConfigMap{} if err := r.Client.Get(ctx, client.ObjectKey{ @@ -467,7 +468,7 @@ func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, ds } func (r *DSCInitializationReconciler) configureCommonMonitoring(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) if err := r.configureSegmentIO(ctx, dsciInit); err != nil { return err } diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index aef6daba07b..ed3e5a9424b 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -9,6 +9,7 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" @@ -19,7 +20,7 @@ import ( ) func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) serviceMeshManagementState := operatorv1.Removed if instance.Spec.ServiceMesh != nil { serviceMeshManagementState = instance.Spec.ServiceMesh.ManagementState @@ -62,7 +63,7 @@ func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, } func (r *DSCInitializationReconciler) removeServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to clean up if instance.Spec.ServiceMesh == nil { return nil diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index 985618bacf8..c4fa5ff89a7 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -139,7 +139,6 @@ var _ = BeforeSuite(func() { err = (&dscictrl.DSCInitializationReconciler{ Client: k8sClient, Scheme: testScheme, - Log: ctrl.Log.WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), }).SetupWithManager(gCtx, mgr) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index be4d6ff071b..77e43b024cf 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -18,6 +18,7 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -37,7 +38,7 @@ var ( // - Network Policies 'opendatahub' that allow traffic between the ODH namespaces // - RoleBinding 'opendatahub'. func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, dscInit *dsciv1.DSCInitialization, name string, platform cluster.Platform) error { - log := r.Log + log := logf.FromContext(ctx) // Expected application namespace for the given name desiredNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -142,7 +143,7 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds } func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // Expected namespace for the given name desiredRoleBinding := &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{ @@ -190,7 +191,7 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte } func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization, platform cluster.Platform) error { - log := r.Log + log := logf.FromContext(ctx) if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { // Get operator namepsace operatorNs, err := cluster.GetOperatorNamespace() @@ -375,7 +376,7 @@ func GenerateRandomHex(length int) ([]byte, error) { } func (r *DSCInitializationReconciler) createOdhCommonConfigMap(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // Expected configmap for the given namespace desiredConfigMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 957e02fe4ae..3c1e8e17601 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -23,7 +23,6 @@ import ( "fmt" "time" - "github.com/go-logr/logr" oauthv1 "github.com/openshift/api/oauth/v1" routev1 "github.com/openshift/api/route/v1" corev1 "k8s.io/api/core/v1" @@ -37,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -52,13 +52,11 @@ const ( type SecretGeneratorReconciler struct { Client client.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - log := r.Log - log.Info("Adding controller for Secret Generation.") +func (r *SecretGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Secret Generation.") // Watch only new secrets with the corresponding annotation predicates := predicate.Funcs{ @@ -106,7 +104,7 @@ func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { // based on the specified type and complexity. This will avoid possible race // conditions when a deployment mounts the secret before it is reconciled. func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { - log := r.Log + log := logf.FromContext(ctx) foundSecret := &corev1.Secret{} err := r.Client.Get(ctx, request.NamespacedName, foundSecret) if err != nil { @@ -209,7 +207,7 @@ func (r *SecretGeneratorReconciler) getRoute(ctx context.Context, name string, n } func (r *SecretGeneratorReconciler) createOAuthClient(ctx context.Context, name string, secretName string, uri string) error { - log := r.Log + log := logf.FromContext(ctx) // Create OAuthClient resource oauthClient := &oauthv1.OAuthClient{ TypeMeta: metav1.TypeMeta{ diff --git a/main.go b/main.go index a146f8783df..5fe4290a121 100644 --- a/main.go +++ b/main.go @@ -244,7 +244,6 @@ func main() { //nolint:funlen,maintidx if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), ApplicationsNamespace: dscApplicationsNamespace, }).SetupWithManager(ctx, mgr); err != nil { @@ -255,7 +254,6 @@ func main() { //nolint:funlen,maintidx if err = (&dscctrl.DataScienceClusterReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), DataScienceCluster: &dscctrl.DataScienceClusterConfig{ DSCISpec: &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: dscApplicationsNamespace, @@ -270,8 +268,7 @@ func main() { //nolint:funlen,maintidx if err = (&secretgenerator.SecretGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretGenerator") os.Exit(1) } @@ -279,8 +276,7 @@ func main() { //nolint:funlen,maintidx if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") os.Exit(1) } From 3862fa147f8e3505b4f1f75ffe60e79b4b99449b Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 30 Sep 2024 19:55:32 +0300 Subject: [PATCH 7/7] logger: blindly convert ctrl.Log users except webhook to contextual All the users should have proper context. Webhook is a separate server so makes own logger based on the controller-runtime's one. The log level changes will affect it as well. Signed-off-by: Yauheni Kaliuta --- components/kserve/servicemesh_setup.go | 5 ++-- pkg/cluster/resources.go | 8 +++-- pkg/upgrade/uninstallation.go | 15 ++++++---- pkg/upgrade/upgrade.go | 41 +++++++++++++++----------- 4 files changed, 41 insertions(+), 28 deletions(-) diff --git a/components/kserve/servicemesh_setup.go b/components/kserve/servicemesh_setup.go index 126e23d88ea..44138ba625e 100644 --- a/components/kserve/servicemesh_setup.go +++ b/components/kserve/servicemesh_setup.go @@ -7,8 +7,8 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -37,6 +37,7 @@ func (k *Kserve) removeServiceMeshConfigurations(ctx context.Context, cli client } func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { + log := logf.FromContext(ctx) return func(registry feature.FeaturesRegistry) error { authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") if err != nil { @@ -68,7 +69,7 @@ func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Clien return kserveExtAuthzErr } } else { - ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") + log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") } return nil diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index b1b0f84cdb5..073366683ea 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -13,8 +13,8 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -178,6 +178,7 @@ func ExecuteOnAllNamespaces(ctx context.Context, cli client.Client, processFunc // WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { + log := logf.FromContext(ctx) resourceInterval := time.Duration(interval) * time.Second resourceTimeout := time.Duration(timeout) * time.Minute @@ -188,7 +189,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN return false, fmt.Errorf("error fetching list of deployments: %w", err) } - ctrl.Log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) + log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) for _, deployment := range componentDeploymentList.Items { if deployment.Status.ReadyReplicas != deployment.Status.Replicas { return false, nil @@ -200,6 +201,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN } func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, timeoutMin int) error { + log := logf.FromContext(ctx) interval := time.Second * 5 // arbitrary value timeout := time.Duration(timeoutMin) * time.Minute @@ -227,7 +229,7 @@ func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, // retry if 500, assume webhook is not available if k8serr.IsInternalError(errCreate) { - ctrl.Log.Info("Error creating object, retrying...", "reason", errCreate) + log.Info("Error creating object, retrying...", "reason", errCreate) return false, nil } diff --git a/pkg/upgrade/uninstallation.go b/pkg/upgrade/uninstallation.go index 315b23c1244..cc5c28e3682 100644 --- a/pkg/upgrade/uninstallation.go +++ b/pkg/upgrade/uninstallation.go @@ -10,6 +10,7 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -25,6 +26,7 @@ const ( // OperatorUninstall deletes all the externally generated resources. // This includes DSCI, namespace created by operator (but not workbench or MR's), subscription and CSV. func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster.Platform) error { + log := logf.FromContext(ctx) if err := removeDSCInitialization(ctx, cli); err != nil { return err } @@ -51,7 +53,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. if err := cli.Delete(ctx, &namespace); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } - ctrl.Log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") + log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") } } @@ -66,7 +68,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. return err } - ctrl.Log.Info("Removing operator subscription which in turn will remove installplan") + log.Info("Removing operator subscription which in turn will remove installplan") subsName := "opendatahub-operator" if platform == cluster.SelfManagedRhods { subsName = "rhods-operator" @@ -77,10 +79,10 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. } } - ctrl.Log.Info("Removing the operator CSV in turn remove operator deployment") + log.Info("Removing the operator CSV in turn remove operator deployment") err = removeCSV(ctx, cli) - ctrl.Log.Info("All resources deleted as part of uninstall.") + log.Info("All resources deleted as part of uninstall.") return err } @@ -126,6 +128,7 @@ func HasDeleteConfigMap(ctx context.Context, c client.Client) bool { } func removeCSV(ctx context.Context, c client.Client) error { + log := logf.FromContext(ctx) // Get watchNamespace operatorNamespace, err := cluster.GetOperatorNamespace() if err != nil { @@ -142,7 +145,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return err } - ctrl.Log.Info("Deleting CSV " + operatorCsv.Name) + log.Info("Deleting CSV " + operatorCsv.Name) err = c.Delete(ctx, operatorCsv) if err != nil { if k8serr.IsNotFound(err) { @@ -151,7 +154,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return fmt.Errorf("error deleting clusterserviceversion: %w", err) } - ctrl.Log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") + log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") return nil } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index eedbb45cca3..6b8f857c465 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -22,8 +22,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -115,6 +115,7 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { // If there exists default-dsci instance already, it will not update DSCISpec on it. // Note: DSCI CR modifcations are not supported, as it is the initial prereq setting for the components. func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platform, appNamespace, monNamespace string) error { + log := logf.FromContext(ctx) defaultDsciSpec := &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: appNamespace, Monitoring: dsciv1.Monitoring{ @@ -152,14 +153,14 @@ func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platfor switch { case len(instances.Items) > 1: - ctrl.Log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") + log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") return nil case len(instances.Items) == 1: // Do not patch/update if DSCI already exists. - ctrl.Log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") + log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") return nil case len(instances.Items) == 0: - ctrl.Log.Info("create default DSCI CR.") + log.Info("create default DSCI CR.") err := cluster.CreateWithRetry(ctx, cli, defaultDsci, 1) // 1 min timeout if err != nil { return err @@ -294,13 +295,14 @@ func deleteResources(ctx context.Context, c client.Client, resources *[]Resource } func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) error { + log := logf.FromContext(ctx) list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(res.Gvk) err := c.List(ctx, list, client.InNamespace(res.Namespace)) if err != nil { if errors.Is(err, &meta.NoKindMatchError{}) { - ctrl.Log.Info("CRD not found, will not delete " + res.Gvk.String()) + log.Info("CRD not found, will not delete " + res.Gvk.String()) return nil } return fmt.Errorf("failed to list %s: %w", res.Gvk.Kind, err) @@ -323,7 +325,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e if err != nil { return fmt.Errorf("failed to delete %s %s/%s: %w", res.Gvk.Kind, res.Namespace, item.GetName(), err) } - ctrl.Log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) + log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) } } } @@ -332,6 +334,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e } func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace string, resourceList []string, resourceType client.ObjectList) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} if err := cli.List(ctx, resourceType, listOpts); err != nil { @@ -342,16 +345,16 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace item := items.Index(i).Addr().Interface().(client.Object) //nolint:errcheck,forcetypeassert for _, name := range resourceList { if name == item.GetName() { - ctrl.Log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) + log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) err := cli.Delete(ctx, item) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + item.GetName() + " in namespace " + namespace) + log.Info("Could not find " + item.GetName() + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + item.GetName()) + log.Info("Successfully deleted " + item.GetName()) } } } @@ -360,6 +363,7 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace // Need to handle ServiceMonitor deletion separately as the generic function does not work for ServiceMonitors because of how the package is built. func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, namespace string, resourceList []string) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} servicemonitors := &monitoringv1.ServiceMonitorList{} @@ -371,16 +375,16 @@ func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, nam servicemonitor := servicemonitor for _, name := range resourceList { if name == servicemonitor.Name { - ctrl.Log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) err := cli.Delete(ctx, servicemonitor) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + servicemonitor.Name) + log.Info("Successfully deleted " + servicemonitor.Name) } } } @@ -451,10 +455,11 @@ func unsetOwnerReference(ctx context.Context, cli client.Client, instanceName st } func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableBiasMetrics to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") // flip TrustyAI BiasMetrics to false (.spec.dashboardConfig.disableBiasMetrics) disableBiasMetricsValue := []byte(`{"spec": {"dashboardConfig": {"disableBiasMetrics": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableBiasMetricsValue)); err != nil { @@ -462,22 +467,23 @@ func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName s } return nil } - ctrl.Log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") + log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") return nil } func updateODCModelRegistry(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableModelRegistry to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") disableModelRegistryValue := []byte(`{"spec": {"dashboardConfig": {"disableModelRegistry": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableModelRegistryValue)); err != nil { return fmt.Errorf("error enable ModelRegistry in CR %s : %w", instanceName, err) } return nil } - ctrl.Log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") + log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") return nil } @@ -497,6 +503,7 @@ func RemoveLabel(ctx context.Context, cli client.Client, objectName string, labe } func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace string) error { + log := logf.FromContext(ctx) foundNamespace := &corev1.Namespace{} if err := cli.Get(ctx, client.ObjectKey{Name: namespace}, foundNamespace); err != nil { if k8serr.IsNotFound(err) { @@ -525,7 +532,7 @@ func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace return fmt.Errorf("error getting pods from namespace %s: %w", namespace, err) } if len(podList.Items) != 0 { - ctrl.Log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") + log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") return nil }