Skip to content

Commit 840eb9e

Browse files
committed
Service monitor scrap tempo metrics for monolithic
Signed-off-by: Ruben Vargas <[email protected]>
1 parent c3d8027 commit 840eb9e

File tree

11 files changed

+329
-49
lines changed

11 files changed

+329
-49
lines changed
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: bug_fix
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. tempostack, tempomonolithic, github action)
5+
component: tempomonolithic
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Scrap tempo metrics for monolithic.
9+
10+
# One or more tracking issues related to the change
11+
issues: [1275]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext:

internal/manifests/monolithic/configmap.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,9 @@ func buildTempoConfig(opts Options) ([]byte, error) {
177177
config.Server.HttpServerReadTimeout = opts.Tempo.Spec.Timeout.Duration
178178
config.Server.HttpServerWriteTimeout = opts.Tempo.Spec.Timeout.Duration
179179
if tempo.Spec.Multitenancy.IsGatewayEnabled() {
180+
// We need this to scrap metrics.
181+
config.Server.HTTPListenAddress = "0.0.0.0"
180182
// all connections to tempo must go via gateway
181-
config.Server.HTTPListenAddress = "localhost"
182183
config.Server.GRPCListenAddress = "localhost"
183184
}
184185

internal/manifests/monolithic/servicemonitor.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,14 @@ func BuildServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
1212
tempo := opts.Tempo
1313
if tempo.Spec.Multitenancy.IsGatewayEnabled() {
1414
labels := ComponentLabels(manifestutils.GatewayComponentName, tempo.Name)
15-
return servicemonitor.NewServiceMonitor(tempo.Namespace, tempo.Name, labels, false, manifestutils.GatewayComponentName, manifestutils.GatewayInternalHttpPortName)
15+
return servicemonitor.NewServiceMonitor(tempo.Namespace, tempo.Name, labels, false, manifestutils.GatewayComponentName,
16+
[]string{
17+
manifestutils.GatewayInternalHttpPortName,
18+
manifestutils.HttpPortName,
19+
})
1620
} else {
1721
labels := ComponentLabels(manifestutils.TempoMonolithComponentName, tempo.Name)
18-
return servicemonitor.NewServiceMonitor(tempo.Namespace, tempo.Name, labels, false, manifestutils.TempoMonolithComponentName, manifestutils.HttpPortName)
22+
return servicemonitor.NewServiceMonitor(
23+
tempo.Namespace, tempo.Name, labels, false, manifestutils.TempoMonolithComponentName, []string{manifestutils.HttpPortName})
1924
}
2025
}

internal/manifests/monolithic/servicemonitor_test.go

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,23 @@ func TestBuildServiceMonitorGateway(t *testing.T) {
120120
TargetLabel: "job",
121121
},
122122
},
123-
}},
123+
}, {
124+
Scheme: "http",
125+
Port: "http",
126+
Path: "/metrics",
127+
RelabelConfigs: []*monitoringv1.RelabelConfig{
128+
{
129+
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_service_label_app_kubernetes_io_instance"},
130+
TargetLabel: "cluster",
131+
},
132+
{
133+
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_namespace", "__meta_kubernetes_service_label_app_kubernetes_io_component"},
134+
Separator: ptr.To("/"),
135+
TargetLabel: "job",
136+
},
137+
},
138+
},
139+
},
124140
NamespaceSelector: monitoringv1.NamespaceSelector{
125141
MatchNames: []string{"default"},
126142
},

internal/manifests/monolithic/services.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,12 @@ func buildGatewayService(opts Options) *corev1.Service {
146146
Port: manifestutils.GatewayPortInternalHTTPServer,
147147
TargetPort: intstr.FromString(manifestutils.GatewayInternalHttpPortName),
148148
},
149+
{
150+
Name: manifestutils.HttpPortName,
151+
Protocol: corev1.ProtocolTCP,
152+
Port: manifestutils.PortHTTPServer,
153+
TargetPort: intstr.FromString(manifestutils.HttpPortName),
154+
},
149155
}
150156

151157
if tempo.Spec.Ingestion != nil && tempo.Spec.Ingestion.OTLP != nil &&

internal/manifests/monolithic/services_test.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,12 @@ func TestBuildServices(t *testing.T) {
269269
Port: 8081,
270270
TargetPort: intstr.FromString("internal"),
271271
},
272+
{
273+
Name: "http",
274+
Protocol: corev1.ProtocolTCP,
275+
Port: 3200,
276+
TargetPort: intstr.FromString("http"),
277+
},
272278
{
273279
Name: "otlp-grpc",
274280
Protocol: corev1.ProtocolTCP,

internal/manifests/servicemonitor/servicemonitor.go

Lines changed: 28 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,14 @@ func BuildServiceMonitors(params manifestutils.Params) []client.Object {
3535

3636
func buildServiceMonitor(params manifestutils.Params, component string, port string) *monitoringv1.ServiceMonitor {
3737
labels := manifestutils.ComponentLabels(component, params.Tempo.Name)
38-
return NewServiceMonitor(params.Tempo.Namespace, params.Tempo.Name, labels, params.CtrlConfig.Gates.HTTPEncryption, component, port)
38+
return NewServiceMonitor(params.Tempo.Namespace, params.Tempo.Name, labels, params.CtrlConfig.Gates.HTTPEncryption, component, []string{port})
3939
}
4040

4141
func buildFrontEndServiceMonitor(params manifestutils.Params, port string) *monitoringv1.ServiceMonitor {
4242
labels := manifestutils.ComponentLabels(manifestutils.QueryFrontendComponentName, params.Tempo.Name)
4343
tls := params.CtrlConfig.Gates.HTTPEncryption && params.Tempo.Spec.Template.Gateway.Enabled
4444
return NewServiceMonitor(params.Tempo.Namespace, params.Tempo.Name, labels, tls,
45-
manifestutils.QueryFrontendComponentName, port)
45+
manifestutils.QueryFrontendComponentName, []string{port})
4646
}
4747

4848
// NewServiceMonitor creates a ServiceMonitor.
@@ -52,7 +52,7 @@ func NewServiceMonitor(
5252
labels labels.Set,
5353
tls bool,
5454
component string,
55-
port string,
55+
ports []string,
5656
) *monitoringv1.ServiceMonitor {
5757
scheme := "http"
5858
var tlsConfig *monitoringv1.TLSConfig
@@ -91,6 +91,30 @@ func NewServiceMonitor(
9191
}
9292
}
9393

94+
var endpoints []monitoringv1.Endpoint
95+
96+
for _, port := range ports {
97+
endpoints = append(endpoints, monitoringv1.Endpoint{
98+
Scheme: scheme,
99+
Port: port,
100+
Path: "/metrics",
101+
TLSConfig: tlsConfig,
102+
// Custom relabel configs to be compatible with predefined Tempo dashboards:
103+
// https://grafana.com/docs/tempo/latest/operations/monitoring/#dashboards
104+
RelabelConfigs: []*monitoringv1.RelabelConfig{
105+
{
106+
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_service_label_app_kubernetes_io_instance"},
107+
TargetLabel: "cluster",
108+
},
109+
{
110+
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_namespace", "__meta_kubernetes_service_label_app_kubernetes_io_component"},
111+
Separator: ptr.To("/"),
112+
TargetLabel: "job",
113+
},
114+
},
115+
})
116+
}
117+
94118
return &monitoringv1.ServiceMonitor{
95119
TypeMeta: metav1.TypeMeta{
96120
APIVersion: monitoringv1.SchemeGroupVersion.String(),
@@ -102,25 +126,7 @@ func NewServiceMonitor(
102126
Labels: labels,
103127
},
104128
Spec: monitoringv1.ServiceMonitorSpec{
105-
Endpoints: []monitoringv1.Endpoint{{
106-
Scheme: scheme,
107-
Port: port,
108-
Path: "/metrics",
109-
TLSConfig: tlsConfig,
110-
// Custom relabel configs to be compatible with predefined Tempo dashboards:
111-
// https://grafana.com/docs/tempo/latest/operations/monitoring/#dashboards
112-
RelabelConfigs: []*monitoringv1.RelabelConfig{
113-
{
114-
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_service_label_app_kubernetes_io_instance"},
115-
TargetLabel: "cluster",
116-
},
117-
{
118-
SourceLabels: []monitoringv1.LabelName{"__meta_kubernetes_namespace", "__meta_kubernetes_service_label_app_kubernetes_io_component"},
119-
Separator: ptr.To("/"),
120-
TargetLabel: "job",
121-
},
122-
},
123-
}},
129+
Endpoints: endpoints,
124130
NamespaceSelector: monitoringv1.NamespaceSelector{
125131
MatchNames: []string{namespace},
126132
},
Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
Name: tempo-tmmono-0
2+
Namespace: tempo
3+
Priority: 0
4+
Node: crc/192.168.126.11
5+
Start Time: Wed, 14 May 2025 22:00:36 -0600
6+
Labels: app.kubernetes.io/component=tempo
7+
app.kubernetes.io/instance=tmmono
8+
app.kubernetes.io/managed-by=tempo-operator
9+
app.kubernetes.io/name=tempo-monolithic
10+
apps.kubernetes.io/pod-index=0
11+
controller-revision-hash=tempo-tmmono-587bb7976b
12+
statefulset.kubernetes.io/pod-name=tempo-tmmono-0
13+
Annotations: k8s.ovn.org/pod-networks:
14+
{"default":{"ip_addresses":["10.217.0.120/23"],"mac_address":"0a:58:0a:d9:00:78","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0....
15+
k8s.v1.cni.cncf.io/network-status:
16+
[{
17+
"name": "ovn-kubernetes",
18+
"interface": "eth0",
19+
"ips": [
20+
"10.217.0.120"
21+
],
22+
"mac": "0a:58:0a:d9:00:78",
23+
"default": true,
24+
"dns": {}
25+
}]
26+
openshift.io/scc: restricted-v2
27+
seccomp.security.alpha.kubernetes.io/pod: runtime/default
28+
tempo.grafana.com/tempoConfig.hash: f7f667dd55f49777f17305255e46f2241e6728d95c531cc77170f54e032244be
29+
Status: Pending
30+
IP: 10.217.0.120
31+
IPs:
32+
IP: 10.217.0.120
33+
Controlled By: StatefulSet/tempo-tmmono
34+
Containers:
35+
tempo:
36+
Container ID:
37+
Image: docker.io/grafana/tempo:2.7.2
38+
Image ID:
39+
Ports: 3200/TCP, 3101/TCP, 4317/TCP, 4318/TCP
40+
Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP
41+
Args:
42+
-config.file=/conf/tempo.yaml
43+
-mem-ballast-size-mbs=1024
44+
-log.level=info
45+
--storage.trace.s3.secret_key=$(S3_SECRET_KEY)
46+
--storage.trace.s3.access_key=$(S3_ACCESS_KEY)
47+
State: Waiting
48+
Reason: CreateContainerConfigError
49+
Ready: False
50+
Restart Count: 0
51+
Readiness: http-get http://:tempo-internal/ready delay=15s timeout=1s period=10s #success=1 #failure=3
52+
Environment:
53+
S3_SECRET_KEY: <set to the key 'access_key_secret' in secret 'aws-sts'> Optional: false
54+
S3_ACCESS_KEY: <set to the key 'access_key_id' in secret 'aws-sts'> Optional: false
55+
Mounts:
56+
/conf from tempo-conf (ro)
57+
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wpk74 (ro)
58+
/var/tempo from tempo-storage (rw)
59+
jaeger-query:
60+
Container ID: cri-o://de0274c3fd0d76c0c54b493c822c79c42cbe35d67d20dd0d6af2a4fd29dceedf
61+
Image: docker.io/jaegertracing/jaeger-query:1.68.0
62+
Image ID: docker.io/jaegertracing/jaeger-query@sha256:51d85a53cb9a1bba8b016537b03023be02ae666f18af406fd98904b78a5df5f4
63+
Ports: 16685/TCP, 16686/TCP, 16687/TCP
64+
Host Ports: 0/TCP, 0/TCP, 0/TCP
65+
Args:
66+
--query.base-path=/
67+
--span-storage.type=grpc
68+
--grpc-storage.server=localhost:7777
69+
--query.bearer-token-propagation=true
70+
State: Running
71+
Started: Wed, 14 May 2025 22:00:41 -0600
72+
Ready: True
73+
Restart Count: 0
74+
Environment: <none>
75+
Mounts:
76+
/tmp from tempo-query-tmp (rw)
77+
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wpk74 (ro)
78+
tempo-query:
79+
Container ID: cri-o://c32026ba2f28bb23dd09ebbe82e8e3ae1f01b56fa2be21d7c708642fc199bc19
80+
Image: docker.io/grafana/tempo-query:2.7.2
81+
Image ID: docker.io/grafana/tempo-query@sha256:284a83d6d2b3430d7b80d66657377812f73ceaaa9c9939cb79f08bf8e9e2a88d
82+
Port: 7777/TCP
83+
Host Port: 0/TCP
84+
Args:
85+
-config=/conf/tempo-query.yaml
86+
State: Running
87+
Started: Wed, 14 May 2025 22:00:47 -0600
88+
Ready: True
89+
Restart Count: 0
90+
Environment: <none>
91+
Mounts:
92+
/conf from tempo-conf (ro)
93+
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wpk74 (ro)
94+
oauth-proxy:
95+
Container ID: cri-o://4ad516e99e04b4f8051b5e47c73acd0fe5b9ca5ce12d1f821cb6f4b1c6605673
96+
Image: quay.io/openshift/origin-oauth-proxy:4.14
97+
Image ID: quay.io/openshift/origin-oauth-proxy@sha256:1ece77d14a685ef2397c3a327844eea45ded00c95471e9e333e35ef3860b1895
98+
Port: 8443/TCP
99+
Host Port: 0/TCP
100+
Args:
101+
--cookie-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
102+
--https-address=:8443
103+
--openshift-service-account=tempo-tmmono
104+
--provider=openshift
105+
--tls-cert=/etc/tls/private/tls.crt
106+
--tls-key=/etc/tls/private/tls.key
107+
--upstream=http://localhost:16686
108+
--upstream-timeout=30s
109+
--openshift-sar={"namespace": "tempo", "resource": "pods", "verb": "get"}
110+
State: Running
111+
Started: Wed, 14 May 2025 22:01:01 -0600
112+
Ready: True
113+
Restart Count: 0
114+
Readiness: http-get https://:oauth-proxy/oauth/healthz delay=10s timeout=5s period=10s #success=1 #failure=3
115+
Environment: <none>
116+
Mounts:
117+
/etc/tls/private from tmmono-ui-oauth-proxy-tls (rw)
118+
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wpk74 (ro)
119+
Conditions:
120+
Type Status
121+
PodReadyToStartContainers True
122+
Initialized True
123+
Ready False
124+
ContainersReady False
125+
PodScheduled True
126+
Volumes:
127+
tempo-storage:
128+
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
129+
ClaimName: tempo-storage-tempo-tmmono-0
130+
ReadOnly: false
131+
tempo-conf:
132+
Type: ConfigMap (a volume populated by a ConfigMap)
133+
Name: tempo-tmmono-config
134+
Optional: false
135+
tempo-query-tmp:
136+
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
137+
Medium:
138+
SizeLimit: <unset>
139+
tmmono-ui-oauth-proxy-tls:
140+
Type: Secret (a volume populated by a Secret)
141+
SecretName: tmmono-ui-oauth-proxy-tls
142+
Optional: false
143+
kube-api-access-wpk74:
144+
Type: Projected (a volume that contains injected data from multiple sources)
145+
TokenExpirationSeconds: 3607
146+
ConfigMapName: kube-root-ca.crt
147+
ConfigMapOptional: <nil>
148+
DownwardAPI: true
149+
ConfigMapName: openshift-service-ca.crt
150+
ConfigMapOptional: <nil>
151+
QoS Class: BestEffort
152+
Node-Selectors: <none>
153+
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
154+
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
155+
Events:
156+
Type Reason Age From Message
157+
---- ------ ---- ---- -------
158+
Normal Scheduled 2m32s default-scheduler Successfully assigned tempo/tempo-tmmono-0 to crc
159+
Normal AddedInterface 2m31s multus Add eth0 [10.217.0.120/23] from ovn-kubernetes
160+
Normal Pulling 2m31s kubelet Pulling image "docker.io/jaegertracing/jaeger-query:1.68.0"
161+
Normal Created 2m27s kubelet Created container jaeger-query
162+
Normal Started 2m27s kubelet Started container jaeger-query
163+
Normal Pulled 2m27s kubelet Successfully pulled image "docker.io/jaegertracing/jaeger-query:1.68.0" in 4.165s (4.165s including waiting). Image size: 86423646 bytes.
164+
Normal Pulling 2m27s kubelet Pulling image "docker.io/grafana/tempo-query:2.7.2"
165+
Normal Pulled 2m21s kubelet Successfully pulled image "docker.io/grafana/tempo-query:2.7.2" in 6.238s (6.238s including waiting). Image size: 64575333 bytes.
166+
Normal Created 2m21s kubelet Created container tempo-query
167+
Normal Started 2m21s kubelet Started container tempo-query
168+
Normal Pulling 2m21s kubelet Pulling image "quay.io/openshift/origin-oauth-proxy:4.14"
169+
Normal Pulled 2m7s kubelet Successfully pulled image "quay.io/openshift/origin-oauth-proxy:4.14" in 13.309s (13.309s including waiting). Image size: 514675129 bytes.
170+
Normal Created 2m7s kubelet Created container oauth-proxy
171+
Normal Started 2m7s kubelet Started container oauth-proxy
172+
Warning Failed 85s (x6 over 2m31s) kubelet Error: couldn't find key access_key_secret in Secret tempo/aws-sts
173+
Normal Pulled 70s (x7 over 2m31s) kubelet Container image "docker.io/grafana/tempo:2.7.2" already present on machine

0 commit comments

Comments
 (0)