diff --git a/.golangci.yml b/.golangci.yml index c512101e1b..66b8800333 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -79,7 +79,9 @@ linters-settings: - pkg: "github.com/stretchr/testify/assert" desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - pkg: "github.com/go-kit/kit/log" - desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + desc: "Use log/slog instead of github.com/go-kit/kit/log" + - pkg: "github.com/go-kit/log" + desc: "Use log/slog instead of github.com/go-kit/log" - pkg: "io/ioutil" desc: "Use corresponding 'os' or 'io' functions instead." - pkg: "regexp" diff --git a/CHANGELOG.md b/CHANGELOG.md index de97354f27..211f488954 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 * [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 * [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 +* [ENHANCEMENT] OTLP receiver: If the feature flag `--created-timestamp-zero-ingestion` is true, convert OTel start timestamps to Prometheus zero samples. #14759 * [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 * [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 diff --git a/cmd/compact/main.go b/cmd/compact/main.go index 7bac352f6b..b5b4117897 100644 --- a/cmd/compact/main.go +++ b/cmd/compact/main.go @@ -9,7 +9,7 @@ import ( "runtime/pprof" "syscall" - golog "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb" ) @@ -35,7 +35,7 @@ func main() { flag.Parse() - logger := golog.NewLogfmtLogger(os.Stderr) + logger := promslog.New(&promslog.Config{}) var blockDirs []string for _, d := range flag.Args() { diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 6fb9970bae..4f6ae43f73 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -157,11 +157,11 @@ type RuleGroup struct { EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` - Labels map[string]string `yaml:"labels,omitempty"` - SourceTenants []string `yaml:"source_tenants,omitempty"` - AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` + SourceTenants []string `yaml:"source_tenants,omitempty"` + AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` } // Rule describes an alerting or recording rule. diff --git a/rules/group.go b/rules/group.go index b3e25f6862..865e7dcc28 100644 --- a/rules/group.go +++ b/rules/group.go @@ -19,7 +19,6 @@ import ( "log/slog" "math" "slices" - "sort" "strings" "sync" "time" @@ -76,8 +75,8 @@ type Group struct { evalIterationFunc GroupEvalIterationFunc // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController - appOpts *storage.AppendOptions + concurrencyController RuleConcurrencyController + appOpts *storage.AppendOptions alignEvaluationTimeOnInterval bool } @@ -135,24 +134,24 @@ func NewGroup(o GroupOptions) *Group { } return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - queryOffset: o.QueryOffset, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: o.Opts, - sourceTenants: o.SourceTenants, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: o.Opts.Logger.With("file", o.File, "group", o.Name), - metrics: metrics, - evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, - appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, + name: o.Name, + file: o.File, + interval: o.Interval, + queryOffset: o.QueryOffset, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: o.Opts, + sourceTenants: o.SourceTenants, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: o.Opts.Logger.With("file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, + concurrencyController: concurrencyController, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } } @@ -598,15 +597,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrTooOldSample): numTooOld++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) default: - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } } else { buf := [1024]byte{} @@ -877,7 +876,7 @@ func (g *Group) Equals(ng *Group) bool { copyAndSort := func(x []string) []string { copied := make([]string, len(x)) copy(copied, x) - sort.Strings(copied) + slices.Sort(copied) return copied } diff --git a/rules/manager_test.go b/rules/manager_test.go index 234e1ce34c..bafcf2dc16 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -862,7 +862,7 @@ func TestUpdate_AlwaysRestore(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), AlwaysRestoreAlertState: true, }) ruleManager.start() @@ -894,7 +894,7 @@ func TestUpdate_AlwaysRestoreDoesntAffectUnchangedGroups(t *testing.T) { Appendable: st, Queryable: st, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), AlwaysRestoreAlertState: true, }) ruleManager.start() @@ -933,7 +933,7 @@ func TestUpdateSetsSourceTenants(t *testing.T) { Queryable: st, QueryFunc: EngineQueryFunc(engine, st), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() defer ruleManager.Stop() @@ -975,7 +975,7 @@ func TestAlignEvaluationTimeOnInterval(t *testing.T) { Queryable: st, QueryFunc: EngineQueryFunc(engine, st), Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), }) ruleManager.start() defer ruleManager.Stop() @@ -1047,7 +1047,7 @@ func TestGroupEvaluationContextFuncIsCalledWhenSupplied(t *testing.T) { Queryable: st, QueryFunc: mockQueryFunc, Context: context.Background(), - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), GroupEvaluationContextFunc: mockContextWrapFunc, }) @@ -1080,13 +1080,13 @@ type ruleGroupsTest struct { // ruleGroupTest forms a testing struct for running tests over rules. type ruleGroupTest struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []rulefmt.Rule `yaml:"rules"` - Labels map[string]string `yaml:"labels,omitempty"` - SourceTenants []string `yaml:"source_tenants,omitempty"` - AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []rulefmt.Rule `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` + SourceTenants []string `yaml:"source_tenants,omitempty"` + AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` } func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { @@ -1109,7 +1109,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { Interval: g.Interval, Limit: g.Limit, Rules: rtmp, - Labels: g.Labels, + Labels: g.Labels, SourceTenants: g.SourceTenants, AlignEvaluationTimeOnInterval: g.AlignEvaluationTimeOnInterval, }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index b6b6e2aeb8..08d72c52f4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "log" + "log/slog" "math" "slices" "sort" @@ -28,8 +29,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - gokitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -245,7 +244,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error { + resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -337,7 +336,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo labels := createLabels(baseName+createdSuffix, baseLabels) c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") + logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil @@ -451,7 +450,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, logger gokitlog.Logger) error { + settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -508,7 +507,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") + logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil @@ -592,7 +591,7 @@ const defaultIntervalForStartTimestamps = int64(300_000) // make use of its direct support fort Created Timestamps instead. // See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles // resets for cumulative metrics. -func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger gokitlog.Logger) { +func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger *slog.Logger) { if !settings.EnableCreatedTimestampZeroIngestion { return } @@ -614,7 +613,7 @@ func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb return } - level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) + logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) // See https://github.com/prometheus/prometheus/issues/14600 for context. c.addSample(&prompb.Sample{Timestamp: startTs}, labels) @@ -690,10 +689,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta return } - ts := convertTimeStamp(timestamp) sample := &prompb.Sample{ - Value: float64(1), - Timestamp: ts, + Value: float64(1), + // convert ns to ms + Timestamp: convertTimeStamp(timestamp), } converter.addSample(sample, labels) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index 041e4fc7fe..051d0d8e86 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -21,13 +21,13 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" ) @@ -196,10 +196,10 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { nowMinus6m := pcommon.Timestamp(now.Add(-20 * time.Second).UnixNano()) nowMinus1h := pcommon.Timestamp(now.Add(-1 * time.Hour).UnixNano()) tests := []struct { + name string overrideValidInterval time.Duration metric func() pmetric.Metric want func() map[uint64]*prompb.TimeSeries - name string }{ { name: "summary with start time equal to sample timestamp", @@ -437,7 +437,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { ValidIntervalCreatedTimestampZeroIngestion: tt.overrideValidInterval, }, metric.Name(), - log.NewNopLogger(), + promslog.NewNopLogger(), ) require.NoError(t, err) @@ -551,7 +551,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { EnableCreatedTimestampZeroIngestion: true, }, metric.Name(), - log.NewNopLogger(), + promslog.NewNopLogger(), ) require.NoError(t, err) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 5bd7fc7410..8349d4f907 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -60,7 +60,6 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index a74caabc58..a7f41a6316 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "strings" "time" - "github.com/go-kit/log" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" @@ -67,7 +67,7 @@ func NewPrometheusConverter() *PrometheusConverter { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) { +func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index b8145401f8..9d75c83dd0 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -34,7 +34,7 @@ func TestFromMetrics(t *testing.T) { converter := NewPrometheusConverter() payload := createExportRequest(5, 128, 128, 2, 0) - annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) + annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}, promslog.NewNopLogger()) require.NoError(t, err) require.Empty(t, annots) }) @@ -46,7 +46,7 @@ func TestFromMetrics(t *testing.T) { cancel() payload := createExportRequest(5, 128, 128, 2, 0) - annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}, log.NewNopLogger()) + annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}, promslog.NewNopLogger()) require.ErrorIs(t, err, context.Canceled) require.Empty(t, annots) }) @@ -58,7 +58,7 @@ func TestFromMetrics(t *testing.T) { t.Cleanup(cancel) payload := createExportRequest(5, 128, 128, 2, 0) - annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}, log.NewNopLogger()) + annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}, promslog.NewNopLogger()) require.ErrorIs(t, err, context.DeadlineExceeded) require.Empty(t, annots) }) @@ -86,7 +86,7 @@ func TestFromMetrics(t *testing.T) { } converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}, log.NewNopLogger()) + annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}, promslog.NewNopLogger()) require.NoError(t, err) require.NotEmpty(t, annots) ws, infos := annots.AsStrings("", 0, 0) @@ -119,7 +119,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { for i := 0; i < b.N; i++ { converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}, log.NewNopLogger()) + annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}, promslog.NewNopLogger()) require.NoError(b, err) require.Empty(b, annots) require.NotNil(b, converter.TimeSeries()) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index af0c7a478b..30385f8fd2 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -18,10 +18,9 @@ package prometheusremotewrite import ( "context" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -47,9 +46,9 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data model.MetricNameLabel, name, ) - timestamp := convertTimeStamp(pt.Timestamp()) sample := &prompb.Sample{ - Timestamp: timestamp, + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: @@ -67,7 +66,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger log.Logger) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -85,8 +84,8 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo model.MetricNameLabel, name, ) - sample := &prompb.Sample{ + // convert ns to ms Timestamp: timestamp, } switch pt.ValueType() { @@ -127,7 +126,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") + logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index fcd8922d92..a0f4bda706 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -21,8 +21,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -256,7 +256,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { EnableCreatedTimestampZeroIngestion: true, }, metric.Name(), - log.NewNopLogger(), + promslog.NewNopLogger(), ) assert.Equal(t, tt.want(), converter.unique) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 1020e44723..99e4392ff5 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -497,7 +497,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, con } type otlpWriteHandler struct { - logger *slog.Logger + logger *slog.Logger rwHandler *writeHandler configFunc func() config.Config enableCTZeroIngestion bool diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 17b35ec9e4..7d05f5b912 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -380,7 +380,7 @@ func TestOTLPWriteHandler(t *testing.T) { req.Header.Set("Content-Type", "application/x-protobuf") appendable := &mockAppendable{} - handler := NewOTLPWriteHandler(log.NewNopLogger(), appendable, func() config.Config { + handler := NewOTLPWriteHandler(promslog.NewNopLogger(), appendable, func() config.Config { return config.Config{ OTLPConfig: config.DefaultOTLPConfig, } diff --git a/tsdb/block.go b/tsdb/block.go index 40299206ab..bfb04c3f43 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -352,7 +352,7 @@ func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, } // OpenBlockWithOptions is like OpenBlock but allows to pass a cache provider and sharding function. -func OpenBlockWithOptions(logger log.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { +func OpenBlockWithOptions(logger *slog.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { if logger == nil { logger = promslog.NewNopLogger() } diff --git a/tsdb/compact.go b/tsdb/compact.go index 3f7e2c88d0..c176c80483 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -550,8 +550,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, meta := outBlocks[ix].meta if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "compact blocks resulted in empty block", + c.logger.Info( + "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), @@ -561,8 +561,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, allOutputBlocksAreEmpty = false ulids[ix] = outBlocks[ix].meta.ULID - level.Info(c.logger).Log( - "msg", "compact blocks", + c.logger.Info( + "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime, @@ -680,12 +680,12 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s for _, ob := range obs { if ob.tmpDir != "" { if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) } } if ob.blockDir != "" { if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -700,8 +700,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s meta := outBlocks[ix][jx].meta if meta.Stats.NumSamples != 0 { noOOOBlock = false - level.Info(c.logger).Log( - "msg", "compact ooo head", + c.logger.Info( + "compact ooo head", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -716,8 +716,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s } if noOOOBlock { - level.Info(c.logger).Log( - "msg", "compact ooo head resulted in no blocks", + c.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil @@ -814,7 +814,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop if err != nil && ob.blockDir != "" { // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -939,7 +939,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop return nil } -func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Logger) { +func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger *slog.Logger) { if len(chks) <= 1 { return } @@ -955,7 +955,6 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo // Looks like the chunk is out of order. logValues := []any{ - "msg", "found out-of-order chunk when compacting", "num_chunks_for_series", len(chks), "index", i, "labels", lbls.String(), @@ -983,7 +982,7 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo ) } - level.Warn(logger).Log(logValues...) + logger.Warn("found out-of-order chunk when compacting", logValues...) } } @@ -1015,7 +1014,7 @@ type DefaultBlockPopulator struct{} // It expects sorted blocks input by mint. // If there is more than 1 output block, each output block will only contain series that hash into its shard // (based on total number of output blocks). -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block(s) from no readers") } @@ -1319,7 +1318,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM } // Returns opened blocks, and blocks that should be closed (also returned in case of error). -func openBlocksForCompaction(dirs []string, open []*Block, logger log.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { +func openBlocksForCompaction(dirs []string, open []*Block, logger *slog.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { blocks = make([]*Block, 0, len(dirs)) blocksToClose = make([]*Block, 0, len(dirs)) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index e5f0629f97..f66ec062ef 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -537,7 +537,7 @@ func TestCompaction_CompactWithSplitting(t *testing.T) { for _, shardCount := range shardCounts { t.Run(fmt.Sprintf("series=%d, shards=%d", series, shardCount), func(t *testing.T) { - c, err := NewLeveledCompactorWithChunkSize(ctx, nil, log.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil) + c, err := NewLeveledCompactorWithChunkSize(ctx, nil, promslog.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil) require.NoError(t, err) blockIDs, err := c.CompactWithSplitting(dir, blockDirs, openBlocks, shardCount) @@ -572,7 +572,7 @@ func TestCompaction_CompactWithSplitting(t *testing.T) { // Our splitting compaction preserves it too. seriesSymbols[""] = struct{}{} - block, err := OpenBlock(log.NewNopLogger(), filepath.Join(dir, blockID.String()), nil) + block, err := OpenBlock(promslog.NewNopLogger(), filepath.Join(dir, blockID.String()), nil) require.NoError(t, err) defer func() { @@ -658,7 +658,7 @@ func TestCompaction_CompactEmptyBlocks(t *testing.T) { require.NoError(t, os.Mkdir(bdir, 0o777)) require.NoError(t, os.Mkdir(chunkDir(bdir), 0o777)) - _, err := writeMetaFile(log.NewNopLogger(), bdir, m) + _, err := writeMetaFile(promslog.NewNopLogger(), bdir, m) require.NoError(t, err) iw, err := index.NewWriter(context.Background(), filepath.Join(bdir, indexFilename)) @@ -671,7 +671,7 @@ func TestCompaction_CompactEmptyBlocks(t *testing.T) { blockDirs = append(blockDirs, bdir) } - c, err := NewLeveledCompactorWithChunkSize(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil) + c, err := NewLeveledCompactorWithChunkSize(context.Background(), nil, promslog.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil) require.NoError(t, err) blockIDs, err := c.CompactWithSplitting(dir, blockDirs, nil, 5) @@ -1659,7 +1659,7 @@ func TestOpenBlocksForCompaction(t *testing.T) { // Open subset of blocks first. const blocksToOpen = 2 - opened, toClose, err := openBlocksForCompaction(blockDirs[:blocksToOpen], nil, log.NewNopLogger(), nil, 10) + opened, toClose, err := openBlocksForCompaction(blockDirs[:blocksToOpen], nil, promslog.NewNopLogger(), nil, 10) for _, b := range toClose { defer func(b *Block) { require.NoError(t, b.Close()) }(b) } @@ -1669,7 +1669,7 @@ func TestOpenBlocksForCompaction(t *testing.T) { checkBlocks(t, toClose, blockDirs[:blocksToOpen]...) // Open all blocks, but provide previously opened blocks. - opened2, toClose2, err := openBlocksForCompaction(blockDirs, opened, log.NewNopLogger(), nil, 10) + opened2, toClose2, err := openBlocksForCompaction(blockDirs, opened, promslog.NewNopLogger(), nil, 10) for _, b := range toClose2 { defer func(b *Block) { require.NoError(t, b.Close()) }(b) } @@ -1695,11 +1695,11 @@ func TestOpenBlocksForCompactionErrorsNoMeta(t *testing.T) { } // open block[0] - b0, err := OpenBlock(log.NewNopLogger(), blockDirs[0], nil) + b0, err := OpenBlock(promslog.NewNopLogger(), blockDirs[0], nil) require.NoError(t, err) defer func() { require.NoError(t, b0.Close()) }() - _, toClose, err := openBlocksForCompaction(blockDirs, []*Block{b0}, log.NewNopLogger(), nil, 10) + _, toClose, err := openBlocksForCompaction(blockDirs, []*Block{b0}, promslog.NewNopLogger(), nil, 10) require.Error(t, err) // We didn't get to opening more blocks, because we found invalid dir, so there is nothing to close. @@ -1722,7 +1722,7 @@ func TestOpenBlocksForCompactionErrorsMissingIndex(t *testing.T) { } // open block[1] - b1, err := OpenBlock(log.NewNopLogger(), blockDirs[1], nil) + b1, err := OpenBlock(promslog.NewNopLogger(), blockDirs[1], nil) require.NoError(t, err) defer func() { require.NoError(t, b1.Close()) }() @@ -1732,7 +1732,7 @@ func TestOpenBlocksForCompactionErrorsMissingIndex(t *testing.T) { // Block[2] will be opened correctly. // Block[3] is invalid and will cause error. // Block[4] will not be opened at all. - opened, toClose, err := openBlocksForCompaction(blockDirs, []*Block{b1}, log.NewNopLogger(), nil, 1) + opened, toClose, err := openBlocksForCompaction(blockDirs, []*Block{b1}, promslog.NewNopLogger(), nil, 1) for _, b := range toClose { defer func(b *Block) { require.NoError(t, b.Close()) }(b) } diff --git a/tsdb/head.go b/tsdb/head.go index 6a593ea26d..324b0a6060 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -26,8 +26,6 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/promslog" diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 0ef68c9615..b92480b67c 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -516,9 +516,9 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { workerReadyWg.Add(writeConcurrency + readConcurrency) // Start the write workers. - for workerID := 0; workerID < writeConcurrency; workerID++ { + for wid := 0; wid < writeConcurrency; wid++ { // Create copy of workerID to be used by worker routine. - workerID := workerID + workerID := wid g.Go(func() error { // The label sets which this worker will write. @@ -560,9 +560,9 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { readerTsCh := make(chan uint64) // Start the read workers. - for workerID := 0; workerID < readConcurrency; workerID++ { + for wid := 0; wid < readConcurrency; wid++ { // Create copy of threadID to be used by worker routine. - workerID := workerID + workerID := wid g.Go(func() error { querySeriesRef := (seriesCnt / readConcurrency) * workerID diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index b9badfea21..b1641e29ba 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -27,7 +27,6 @@ import ( const testMaxSize int = 32 -// Formulas chosen to make testing easy. // Formulas chosen to make testing easy. func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 26de2e625e..5380798b42 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/util/testutil" ) func TestMain(m *testing.M) { diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index f8d1cfb3ea..b31a04ca8c 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -4341,12 +4341,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.55.0-rc.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0-rc.0.tgz", - "integrity": "sha512-BlDKH2eB8Sd9bQmQjvJvncvZ+VTtrtReSO6qWZXULyrXp+FEjONybOH3Ejq/0a2hat0GpZzcEfwKqPbdy4WdCQ==", + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0.tgz", + "integrity": "sha512-W+aBBToIvxHbcDsQYJSpgaMtcLUCy3SMIK6jluaEgJrkpOfEJnItZu/rvCC/ehCz2c+h+6WkPJklH8WogsXyEg==", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0-rc.0", + "@prometheus-io/lezer-promql": "0.55.0", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,9 +4362,9 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.55.0-rc.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0-rc.0.tgz", - "integrity": "sha512-Ikaabw8gfu0HI2D2rKykLBWio+ytTEE03bdZDMpILYULoeGVPdKgbeGLLI9Kafyv48Qiis55o60EfDoywiRHqA==", + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0.tgz", + "integrity": "sha512-DHg6l6pfDnE8eLsj4DyXhFDe7OsqSBw2EnSVG4biddzLsIF5gXKazIswYTGHJ26CGHHiDPcbXjhlm9dEWI2aJA==", "license": "Apache-2.0", "peerDependencies": { "@lezer/highlight": "^1.1.2",