Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support camelcase matchLabels and matchExpressions in TA config #3418

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .chloggen/3350-ta-matchlabels.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Support camelcase matchLabels and matchExpressions in target allocator config"

# One or more tracking issues related to the change
issues: [3350]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
59 changes: 59 additions & 0 deletions cmd/otel-allocator/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@ import (
"fmt"
"io/fs"
"os"
"reflect"
"time"

"github.com/go-logr/logr"
"github.com/mitchellh/mapstructure"
"github.com/prometheus/common/model"
promconfig "github.com/prometheus/prometheus/config"
_ "github.com/prometheus/prometheus/discovery/install"
Expand Down Expand Up @@ -148,6 +150,58 @@ func LoadFromCLI(target *Config, flagSet *pflag.FlagSet) error {
return nil
}

func StringToModelDurationHookFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(model.Duration(5)) {
return data, nil
}

// Convert it by parsing
return time.ParseDuration(data.(string))
}
}

func decodeSubConfig(t interface{}, dc mapstructure.DecoderConfig) error {
dec, decError := mapstructure.NewDecoder(&dc)
if decError != nil {
return decError
}
if err := dec.Decode(t); err != nil {
return err
}
return nil
}

func flexibleUnmarshal(yamlFile []byte, cfg *Config) error {
Comment on lines +153 to +182
Copy link
Contributor

@swiatekm swiatekm Nov 5, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should add docstrings to these functions, as well as a longer comment explaining why they exist.

t := make(map[string]interface{})
if err := yaml.Unmarshal(yamlFile, &t); err != nil {
return fmt.Errorf("error unmarshaling YAML: %w", err)
}

if t["collector_selector"] != nil {
dc := mapstructure.DecoderConfig{TagName: "yaml", Result: cfg.CollectorSelector}
if err := decodeSubConfig(t["collector_selector"], dc); err != nil {
return err
}
}

if t["prometheus_cr"] != nil {
dc := mapstructure.DecoderConfig{TagName: "yaml", Result: &cfg.PrometheusCR, DecodeHook: StringToModelDurationHookFunc()}
if err := decodeSubConfig(t["prometheus_cr"], dc); err != nil {
return err
}
}

return nil
}

func unmarshal(cfg *Config, configFile string) error {
yamlFile, err := os.ReadFile(configFile)
if err != nil {
Expand All @@ -156,6 +210,11 @@ func unmarshal(cfg *Config, configFile string) error {
if err = yaml.Unmarshal(yamlFile, cfg); err != nil {
return fmt.Errorf("error unmarshaling YAML: %w", err)
}

if err := flexibleUnmarshal(yamlFile, cfg); err != nil {
return err
}

Comment on lines 210 to +217
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should add a comment here explaining why both these statements are necessary. Incidentally, what we're doing here also means that if someone were to use both versions, the camel case version wins.

return nil
}

Expand Down
258 changes: 258 additions & 0 deletions cmd/otel-allocator/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,264 @@ func TestLoad(t *testing.T) {
},
wantErr: assert.NoError,
},
{
name: "service monitor pod monitor selector with camelcase",
args: args{
file: "./testdata/pod_service_selector_camelcase_test.yaml",
},
want: Config{
AllocationStrategy: DefaultAllocationStrategy,
CollectorSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
},
},
FilterStrategy: DefaultFilterStrategy,
PrometheusCR: PrometheusCRConfig{
PodMonitorSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"release": "test",
},
},
ServiceMonitorSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"release": "test",
},
},
ScrapeInterval: DefaultCRScrapeInterval,
},
PromConfig: &promconfig.Config{
GlobalConfig: promconfig.GlobalConfig{
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(60 * time.Second),
},
Runtime: promconfig.DefaultRuntimeConfig,
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "prometheus",
EnableCompression: true,
HonorTimestamps: true,
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonconfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: []discovery.Config{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "prom.domain:9001"},
{model.AddressLabel: "prom.domain:9002"},
{model.AddressLabel: "prom.domain:9003"},
},
Labels: model.LabelSet{
"my": "label",
},
Source: "0",
},
},
},
},
},
},
},
wantErr: assert.NoError,
},
{
name: "service monitor pod monitor selector with matchexpressions",
args: args{
file: "./testdata/pod_service_selector_expressions_test.yaml",
},
want: Config{
AllocationStrategy: DefaultAllocationStrategy,
CollectorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app.kubernetes.io/instance",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"default.test",
},
},
{
Key: "app.kubernetes.io/managed-by",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"opentelemetry-operator",
},
},
},
},
FilterStrategy: DefaultFilterStrategy,
PrometheusCR: PrometheusCRConfig{
PodMonitorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "release",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"test",
},
},
},
},
ServiceMonitorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "release",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"test",
},
},
},
},
ScrapeInterval: DefaultCRScrapeInterval,
},
PromConfig: &promconfig.Config{
GlobalConfig: promconfig.GlobalConfig{
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(60 * time.Second),
},
Runtime: promconfig.DefaultRuntimeConfig,
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "prometheus",
EnableCompression: true,
HonorTimestamps: true,
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonconfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: []discovery.Config{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "prom.domain:9001"},
{model.AddressLabel: "prom.domain:9002"},
{model.AddressLabel: "prom.domain:9003"},
},
Labels: model.LabelSet{
"my": "label",
},
Source: "0",
},
},
},
},
},
},
},
wantErr: assert.NoError,
},
{
name: "service monitor pod monitor selector with camelcase matchexpressions",
args: args{
file: "./testdata/pod_service_selector_camelcase_expressions_test.yaml",
},
want: Config{
AllocationStrategy: DefaultAllocationStrategy,
CollectorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app.kubernetes.io/instance",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"default.test",
},
},
{
Key: "app.kubernetes.io/managed-by",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"opentelemetry-operator",
},
},
},
},
FilterStrategy: DefaultFilterStrategy,
PrometheusCR: PrometheusCRConfig{
PodMonitorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "release",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"test",
},
},
},
},
ServiceMonitorSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "release",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
"test",
},
},
},
},
ScrapeInterval: DefaultCRScrapeInterval,
},
PromConfig: &promconfig.Config{
GlobalConfig: promconfig.GlobalConfig{
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(60 * time.Second),
},
Runtime: promconfig.DefaultRuntimeConfig,
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "prometheus",
EnableCompression: true,
HonorTimestamps: true,
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonconfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: []discovery.Config{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "prom.domain:9001"},
{model.AddressLabel: "prom.domain:9002"},
{model.AddressLabel: "prom.domain:9003"},
},
Labels: model.LabelSet{
"my": "label",
},
Source: "0",
},
},
},
},
},
},
},
wantErr: assert.NoError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
collector_selector:
matchExpressions:
- key: "app.kubernetes.io/instance"
operator: "In"
values:
- "default.test"
- key: "app.kubernetes.io/managed-by"
operator: "In"
values:
- "opentelemetry-operator"
prometheus_cr:
pod_monitor_selector:
matchExpressions:
- key: "release"
operator: "In"
values:
- "test"
service_monitor_selector:
matchExpressions:
- key: "release"
operator: "In"
values:
- "test"
config:
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"]
labels:
my: label
Loading
Loading