Skip to content

Commit

Permalink
Fix up after syncing w/ upstream
Browse files Browse the repository at this point in the history
Signed-off-by: Arve Knudsen <[email protected]>
  • Loading branch information
aknuds1 committed Nov 5, 2024
1 parent d94c7b9 commit 6314392
Show file tree
Hide file tree
Showing 23 changed files with 120 additions and 124 deletions.
4 changes: 3 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,9 @@ linters-settings:
- pkg: "github.com/stretchr/testify/assert"
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- pkg: "github.com/go-kit/kit/log"
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
desc: "Use log/slog instead of github.com/go-kit/kit/log"
- pkg: "github.com/go-kit/log"
desc: "Use log/slog instead of github.com/go-kit/log"
- pkg: "io/ioutil"
desc: "Use corresponding 'os' or 'io' functions instead."
- pkg: "regexp"
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
* [ENHANCEMENT] OTLP receiver: If the feature flag `--created-timestamp-zero-ingestion` is true, convert OTel start timestamps to Prometheus zero samples. #14759
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941

Expand Down
4 changes: 2 additions & 2 deletions cmd/compact/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"runtime/pprof"
"syscall"

golog "github.com/go-kit/log"
"github.com/prometheus/common/promslog"

"github.com/prometheus/prometheus/tsdb"
)
Expand All @@ -35,7 +35,7 @@ func main() {

flag.Parse()

logger := golog.NewLogfmtLogger(os.Stderr)
logger := promslog.New(&promslog.Config{})

var blockDirs []string
for _, d := range flag.Args() {
Expand Down
10 changes: 5 additions & 5 deletions model/rulefmt/rulefmt.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,11 @@ type RuleGroup struct {
EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"`
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`

Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
SourceTenants []string `yaml:"source_tenants,omitempty"`
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
SourceTenants []string `yaml:"source_tenants,omitempty"`
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"`
}

// Rule describes an alerting or recording rule.
Expand Down
51 changes: 25 additions & 26 deletions rules/group.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"log/slog"
"math"
"slices"
"sort"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -76,8 +75,8 @@ type Group struct {
evalIterationFunc GroupEvalIterationFunc

// concurrencyController controls the rules evaluation concurrency.
concurrencyController RuleConcurrencyController
appOpts *storage.AppendOptions
concurrencyController RuleConcurrencyController
appOpts *storage.AppendOptions
alignEvaluationTimeOnInterval bool
}

Expand Down Expand Up @@ -135,24 +134,24 @@ func NewGroup(o GroupOptions) *Group {
}

return &Group{
name: o.Name,
file: o.File,
interval: o.Interval,
queryOffset: o.QueryOffset,
limit: o.Limit,
rules: o.Rules,
shouldRestore: o.ShouldRestore,
opts: o.Opts,
sourceTenants: o.SourceTenants,
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
done: make(chan struct{}),
managerDone: o.done,
terminated: make(chan struct{}),
logger: o.Opts.Logger.With("file", o.File, "group", o.Name),
metrics: metrics,
evalIterationFunc: evalIterationFunc,
concurrencyController: concurrencyController,
appOpts: &storage.AppendOptions{DiscardOutOfOrder: true},
name: o.Name,
file: o.File,
interval: o.Interval,
queryOffset: o.QueryOffset,
limit: o.Limit,
rules: o.Rules,
shouldRestore: o.ShouldRestore,
opts: o.Opts,
sourceTenants: o.SourceTenants,
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
done: make(chan struct{}),
managerDone: o.done,
terminated: make(chan struct{}),
logger: o.Opts.Logger.With("file", o.File, "group", o.Name),
metrics: metrics,
evalIterationFunc: evalIterationFunc,
concurrencyController: concurrencyController,
appOpts: &storage.AppendOptions{DiscardOutOfOrder: true},
alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval,
}
}
Expand Down Expand Up @@ -598,15 +597,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++
level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++
level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++
level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
default:
level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
}
} else {
buf := [1024]byte{}
Expand Down Expand Up @@ -877,7 +876,7 @@ func (g *Group) Equals(ng *Group) bool {
copyAndSort := func(x []string) []string {
copied := make([]string, len(x))
copy(copied, x)
sort.Strings(copied)
slices.Sort(copied)
return copied
}

Expand Down
26 changes: 13 additions & 13 deletions rules/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,7 @@ func TestUpdate_AlwaysRestore(t *testing.T) {
Appendable: st,
Queryable: st,
Context: context.Background(),
Logger: log.NewNopLogger(),
Logger: promslog.NewNopLogger(),
AlwaysRestoreAlertState: true,
})
ruleManager.start()
Expand Down Expand Up @@ -894,7 +894,7 @@ func TestUpdate_AlwaysRestoreDoesntAffectUnchangedGroups(t *testing.T) {
Appendable: st,
Queryable: st,
Context: context.Background(),
Logger: log.NewNopLogger(),
Logger: promslog.NewNopLogger(),
AlwaysRestoreAlertState: true,
})
ruleManager.start()
Expand Down Expand Up @@ -933,7 +933,7 @@ func TestUpdateSetsSourceTenants(t *testing.T) {
Queryable: st,
QueryFunc: EngineQueryFunc(engine, st),
Context: context.Background(),
Logger: log.NewNopLogger(),
Logger: promslog.NewNopLogger(),
})
ruleManager.start()
defer ruleManager.Stop()
Expand Down Expand Up @@ -975,7 +975,7 @@ func TestAlignEvaluationTimeOnInterval(t *testing.T) {
Queryable: st,
QueryFunc: EngineQueryFunc(engine, st),
Context: context.Background(),
Logger: log.NewNopLogger(),
Logger: promslog.NewNopLogger(),
})
ruleManager.start()
defer ruleManager.Stop()
Expand Down Expand Up @@ -1047,7 +1047,7 @@ func TestGroupEvaluationContextFuncIsCalledWhenSupplied(t *testing.T) {
Queryable: st,
QueryFunc: mockQueryFunc,
Context: context.Background(),
Logger: log.NewNopLogger(),
Logger: promslog.NewNopLogger(),
GroupEvaluationContextFunc: mockContextWrapFunc,
})

Expand Down Expand Up @@ -1080,13 +1080,13 @@ type ruleGroupsTest struct {

// ruleGroupTest forms a testing struct for running tests over rules.
type ruleGroupTest struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []rulefmt.Rule `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
SourceTenants []string `yaml:"source_tenants,omitempty"`
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"`
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []rulefmt.Rule `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
SourceTenants []string `yaml:"source_tenants,omitempty"`
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"`
}

func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
Expand All @@ -1109,7 +1109,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
Interval: g.Interval,
Limit: g.Limit,
Rules: rtmp,
Labels: g.Labels,
Labels: g.Labels,
SourceTenants: g.SourceTenants,
AlignEvaluationTimeOnInterval: g.AlignEvaluationTimeOnInterval,
})
Expand Down
21 changes: 10 additions & 11 deletions storage/remote/otlptranslator/prometheusremotewrite/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@ import (
"encoding/hex"
"fmt"
"log"
"log/slog"
"math"
"slices"
"sort"
"strconv"
"unicode/utf8"

"github.com/cespare/xxhash/v2"
gokitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
Expand Down Expand Up @@ -245,7 +244,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
// https://github.com/prometheus/prometheus/issues/13485.
func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error {
resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
return err
Expand Down Expand Up @@ -337,7 +336,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
labels := createLabels(baseName+createdSuffix, baseLabels)
c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp())
}
level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram")
logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram")
}

return nil
Expand Down Expand Up @@ -451,7 +450,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
}

func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
settings Settings, baseName string, logger gokitlog.Logger) error {
settings Settings, baseName string, logger *slog.Logger) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
return err
Expand Down Expand Up @@ -508,7 +507,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp())
}

level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary")
logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary")
}

return nil
Expand Down Expand Up @@ -592,7 +591,7 @@ const defaultIntervalForStartTimestamps = int64(300_000)
// make use of its direct support fort Created Timestamps instead.
// See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles
// resets for cumulative metrics.
func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger gokitlog.Logger) {
func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger *slog.Logger) {
if !settings.EnableCreatedTimestampZeroIngestion {
return
}
Expand All @@ -614,7 +613,7 @@ func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb
return
}

level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value)
logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value)

// See https://github.com/prometheus/prometheus/issues/14600 for context.
c.addSample(&prompb.Sample{Timestamp: startTs}, labels)
Expand Down Expand Up @@ -690,10 +689,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
return
}

ts := convertTimeStamp(timestamp)
sample := &prompb.Sample{
Value: float64(1),
Timestamp: ts,
Value: float64(1),
// convert ns to ms
Timestamp: convertTimeStamp(timestamp),
}
converter.addSample(sample, labels)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ import (
"testing"
"time"

"github.com/go-kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"

"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"

"github.com/prometheus/prometheus/prompb"
)
Expand Down Expand Up @@ -196,10 +196,10 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
nowMinus6m := pcommon.Timestamp(now.Add(-20 * time.Second).UnixNano())
nowMinus1h := pcommon.Timestamp(now.Add(-1 * time.Hour).UnixNano())
tests := []struct {
name string
overrideValidInterval time.Duration
metric func() pmetric.Metric
want func() map[uint64]*prompb.TimeSeries
name string
}{
{
name: "summary with start time equal to sample timestamp",
Expand Down Expand Up @@ -437,7 +437,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
ValidIntervalCreatedTimestampZeroIngestion: tt.overrideValidInterval,
},
metric.Name(),
log.NewNopLogger(),
promslog.NewNopLogger(),
)
require.NoError(t, err)

Expand Down Expand Up @@ -551,7 +551,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
EnableCreatedTimestampZeroIngestion: true,
},
metric.Name(),
log.NewNopLogger(),
promslog.NewNopLogger(),
)
require.NoError(t, err)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
promName,
)
ts, _ := c.getOrCreateTimeSeries(lbls)

ts.Histograms = append(ts.Histograms, histogram)

exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"sort"
"strings"
"time"

"github.com/go-kit/log"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr"
Expand Down Expand Up @@ -67,7 +67,7 @@ func NewPrometheusConverter() *PrometheusConverter {
}

// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) {
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) {
c.everyN = everyNTimes{n: 128}
resourceMetricsSlice := md.ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ {
Expand Down
Loading

0 comments on commit 6314392

Please sign in to comment.