diff --git a/charts/capsule/templates/servicemonitor.yaml b/charts/capsule/templates/servicemonitor.yaml index 0406c5d8f..852a1cfdf 100644 --- a/charts/capsule/templates/servicemonitor.yaml +++ b/charts/capsule/templates/servicemonitor.yaml @@ -17,23 +17,35 @@ metadata: {{- end }} spec: endpoints: - {{- with .endpoint }} - - interval: {{ .interval }} - port: metrics - path: /metrics - {{- with .scrapeTimeout }} - scrapeTimeout: {{ . }} + {{- range .endpoints }} + - {{- if .port }} + port: {{ .port }} + {{- end }} + {{- if .interval }} + interval: {{ .interval }} + {{- end }} + {{- if .path }} + path: {{ .path }} + {{- end }} + {{- if .scheme }} + scheme: {{ .scheme }} + {{- end }} + {{- if .scrapeTimeout }} + scrapeTimeout: {{ .scrapeTimeout }} + {{- end }} + {{- with .params }} + params: {{ toYaml . | nindent 6 }} {{- end }} {{- with .metricRelabelings }} - metricRelabelings: {{- toYaml . | nindent 6 }} + metricRelabelings: {{ toYaml . | nindent 6 }} {{- end }} {{- with .relabelings }} - relabelings: {{- toYaml . | nindent 6 }} + relabelings: {{ toYaml . | nindent 6 }} {{- end }} {{- end }} jobLabel: app.kubernetes.io/name {{- with .targetLabels }} - targetLabels: {{- toYaml . | nindent 4 }} + targetLabels: {{ toYaml . | nindent 4 }} {{- end }} selector: matchLabels: diff --git a/charts/capsule/values.yaml b/charts/capsule/values.yaml index 769e7b153..2543bef92 100644 --- a/charts/capsule/values.yaml +++ b/charts/capsule/values.yaml @@ -322,30 +322,42 @@ monitoring: folder: "" # ServiceMonitor - serviceMonitor: - # -- Enable ServiceMonitor - enabled: false - # -- Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) - namespace: '' - # -- Assign additional labels according to Prometheus' serviceMonitorSelector matching labels - labels: {} - # -- Assign additional Annotations - annotations: {} - # -- Change matching labels - matchLabels: {} - # -- Set targetLabels for the serviceMonitor - targetLabels: [] - endpoint: - # -- Set the scrape interval for the endpoint of the serviceMonitor - interval: "15s" - # -- Set the scrape timeout for the endpoint of the serviceMonitor +# ServiceMonitor +serviceMonitor: + # -- Enable ServiceMonitor + enabled: true + # -- Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) + namespace: '' + # -- Assign additional labels according to Prometheus' serviceMonitorSelector matching labels + labels: {} + # -- Assign additional Annotations + annotations: {} + # -- Change matching labels + matchLabels: {} + # -- Set targetLabels for the serviceMonitor + targetLabels: [] + # -- Define multiple scrape endpoints + endpoints: + # Default metrics scrape + - interval: "15s" scrapeTimeout: "" - # -- Set metricRelabelings for the endpoint of the serviceMonitor metricRelabelings: [] - # -- Set relabelings for the endpoint of the serviceMonitor - relabelings: [] - - + relabelings: + - targetLabel: instance_name + replacement: "demo" + # # Endpoint-a + # - port: http + # interval: 10s + # scrapeTimeout: 5s + # path: /probe + # scheme: http + + # # Endpoint-b + # - port: http + # interval: 10s + # scrapeTimeout: 5s + # path: /probe + # scheme: http # Webhooks configurations webhooks: # -- When `crds.exclusive` is `true` the webhooks will be installed @@ -370,7 +382,6 @@ webhooks: # Admission Webhook Configuration hooks: - resourcepools: pools: # -- Enable the Hook diff --git a/charts/values.yaml b/charts/values.yaml new file mode 100644 index 000000000..0e40cd608 --- /dev/null +++ b/charts/values.yaml @@ -0,0 +1,232 @@ +nameOverride: "capsule" +fullnameOverride: "capsule" +crds: + install: true + # -- Only install the CRDs, no other primitives + exclusive: false +tls: + enableController: true + create: true + name: +proxy: + enabled: false +manager: + rbac: + create: true + existingClusterRoles: [] + # - cluster-admin + # -- Specifies further cluster roles to be added to the capsule manager service account. + existingRoles: + kind: Deployment + image: + registry: ghcr.io + repository: projectcapsule/capsule + pullPolicy: IfNotPresent + tag: v0.10.7 + hostNetwork: false + webhookPort: 94 + options: + capsuleConfiguration: default + logLevel: '4' + # -- Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash + forceTenantPrefix: true + # -- Override the capsule user groups + capsuleUserGroups: ["projectcapsule.dev"] + # -- If specified, disallows creation of namespaces matching the passed regexp + protectedNamespaceRegex: "" + # -- Specifies whether capsule webhooks certificates should be generated by capsule operator + generateCertificates: true + nodeMetadata: + forbiddenLabels: + denied: [] + deniedRegex: "" + forbiddenAnnotations: + denied: [] + deniedRegex: "" + livenessProbe: + httpGet: + path: /healthz + port: 10080 + readinessProbe: + httpGet: + path: /readyz + port: 10080 + resources: +imagePullSecrets: [] +podAnnotations: {} +# The following annotations guarantee scheduling for critical add-on pods +# podAnnotations: +# scheduler.alpha.kubernetes.io/critical-pod: +priorityClassName: '' # system-cluster-critic +podSecurityContext: + seccompProfile: + type: "RuntimeDefault" + runAsGroup: 1002 + runAsNonRoot: true + runAsUser: 10 +securityContext: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true +nodeSelector: {} +tolerations: [] +replicaCount: 1 +# affinity: +# {{if .CloudKernel.IsDev}} +# type: preferredDuringSchedulingIgnoredDuringExecution +# {{else}} +# type: requiredDuringSchedulingIgnoredDuringExecution +# {{end}} +topologySpreadConstraints: +jobs: + image: + registry: docker.io + repository: clastix/kubectl + pullPolicy: IfNotPresent + tag: v1.33.2 + annotations: + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + restartPolicy: Never + ttlSecondsAfterFinished: 60 + podSecurityContext: + seccompProfile: + type: "RuntimeDefault" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1002 + runAsNonRoot: true + runAsUser: 1002 + resources: {} + nodeSelector: {} + tolerations: [] + # affinity: + # {{if .CloudKernel.IsDev}} + # type: preferredDuringSchedulingIgnoredDuringExecution + # {{else}} + # type: requiredDuringSchedulingIgnoredDuringExecution + # {{end}} + topologySpreadConstraints: [] + priorityClassName: +serviceAccount: + create: true + name: +certManager: + generateCertificates: false +webhooks: + # -- When `crds.exclusive` is `true` the webhooks will be installed + exclusive: false + mutatingWebhooksTimeoutSeconds: 30 + validatingWebhooksTimeoutSeconds: 30 + hooks: + namespaceOwnerReference: + failurePolicy: Fail + cordoning: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + ingresses: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + namespaces: + failurePolicy: Fail + networkpolicies: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + pods: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + persistentvolumeclaims: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + tenants: + failurePolicy: Fail + tenantResourceObjects: + failurePolicy: Fail + services: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + nodes: + failurePolicy: Fail + defaults: + ingress: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + pvc: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists + pods: + failurePolicy: Fail + namespaceSelector: + matchExpressions: + - key: capsule.clastix.io/tenant + operator: Exists +serviceMonitor: + enabled: true + endpoints: + - interval: "15s" + relabelings: + - targetLabel: instance_name + replacement: "demo" + - port: http + interval: 10s + scrapeTimeout: 5s + path: /probe + scheme: http + params: + module: [http_2xx] + target: [http://capsule-controller-manager-metrics-service.capsule-system.svc.cluster.local:10080/healthz] + relabelings: + - targetLabel: probe_type + replacement: "livez" + - targetLabel: instance_name + replacement: "demo" + - sourceLabels: [__param_target] + targetLabel: instance + - targetLabel: __address__ + replacement: prometheus-blackbox-exporter.capsule.svc.cluster.local:19115 + - port: http + interval: 10s + scrapeTimeout: 5s + path: /probe + scheme: http + params: + module: [http_2xx] + target: [http://capsule-controller-manager-metrics-service.capsule-system.svc.cluster.local:10080/readyz] + relabelings: + - targetLabel: probe_type + replacement: "readyz" + - targetLabel: instance_name + replacement: "demo" + - sourceLabels: [__param_target] + targetLabel: instance + - targetLabel: __address__ + replacement: prometheus-blackbox-exporter.capsule.svc.cluster.local:19115 \ No newline at end of file