diff --git a/charts/tempo-distributed/README.md b/charts/tempo-distributed/README.md index 943b0b5642..f5642d1b02 100644 --- a/charts/tempo-distributed/README.md +++ b/charts/tempo-distributed/README.md @@ -610,8 +610,9 @@ The memcached default args are removed and should be provided manually. The sett | ingester.initContainers | list | `[]` | | | ingester.labels | object | `{}` | Labels for the ingester StatefulSet | | ingester.nodeSelector | object | `{}` | Node selector for ingester pods | -| ingester.persistence | object | `{"annotations":{},"enabled":false,"inMemory":false,"labels":{},"size":"10Gi","storageClass":null}` | Persistence configuration for ingester | +| ingester.persistence | object | `{"annotations":{},"enableStatefulSetRecreationForSizeChange":false,"enabled":false,"inMemory":false,"labels":{},"size":"10Gi","storageClass":null}` | Persistence configuration for ingester | | ingester.persistence.annotations | object | `{}` | Annotations for ingester's persist volume claim | +| ingester.persistence.enableStatefulSetRecreationForSizeChange | bool | `false` | Enable StatefulSetRecreation for changes to PVC size | | ingester.persistence.enabled | bool | `false` | Enable creating PVCs which is required when using boltdb-shipper | | ingester.persistence.inMemory | bool | `false` | use emptyDir with ramdisk instead of PVC. **Please note that all data in ingester will be lost on pod restart** | | ingester.persistence.labels | object | `{}` | Labels for ingester's persist volume claim | @@ -766,8 +767,9 @@ The memcached default args are removed and should be provided manually. The sett | metricsGenerator.maxUnavailable | int | `1` | Pod Disruption Budget maxUnavailable | | metricsGenerator.minReadySeconds | int | `10` | Minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing/terminating | | metricsGenerator.nodeSelector | object | `{}` | Node selector for metrics-generator pods | -| metricsGenerator.persistence | object | `{"annotations":{},"enabled":false,"labels":{},"size":"10Gi","storageClass":null}` | Persistence configuration for metrics-generator | +| metricsGenerator.persistence | object | `{"annotations":{},"enableStatefulSetRecreationForSizeChange":false,"enabled":false,"labels":{},"size":"10Gi","storageClass":null}` | Persistence configuration for metrics-generator | | metricsGenerator.persistence.annotations | object | `{}` | Annotations for metrics generator PVCs | +| metricsGenerator.persistence.enableStatefulSetRecreationForSizeChange | bool | `false` | Enable StatefulSetRecreation for changes to PVC size. This means that the StatefulSet will be deleted, recreated (with the same name) and rolled when a change to the PVC size is detected. That way the PVC can be resized without manual intervention. | | metricsGenerator.persistence.enabled | bool | `false` | Enable creating PVCs if you have kind set to StatefulSet. This disables using local disk or memory configured in walEmptyDir | | metricsGenerator.persistence.labels | object | `{}` | Labels for metrics generator PVCs | | metricsGenerator.persistence.storageClass | string | `nil` | Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). | diff --git a/charts/tempo-distributed/templates/_helpers.tpl b/charts/tempo-distributed/templates/_helpers.tpl index 0fdf75ef5f..cf0e47bd84 100644 --- a/charts/tempo-distributed/templates/_helpers.tpl +++ b/charts/tempo-distributed/templates/_helpers.tpl @@ -310,3 +310,161 @@ Cluster name that shows up in dashboard metrics {{- define "tempo.clusterName" -}} {{ (include "tempo.calculatedConfig" . | fromYaml).cluster_name | default .Release.Name }} {{- end -}} + +{{- define "tempo.statefulset.recreateOnSizeChangeHook" -}} + {{- $renderedStatefulSets := list -}} + {{- range $renderedStatefulSet := include (print .context.Template.BasePath .pathToStatefulsetTemplate) .context | splitList "---" -}} + {{- with $renderedStatefulSet | fromYaml -}} + {{- $renderedStatefulSets = append $renderedStatefulSets . -}} + {{- end }} + {{- end -}} + {{- if $renderedStatefulSets }} + {{- range $newStatefulSet := $renderedStatefulSets -}} + {{- $currentStatefulset := dict -}} + {{- if $newStatefulSet.spec.volumeClaimTemplates }} + {{- $currentStatefulset = lookup $newStatefulSet.apiVersion $newStatefulSet.kind $newStatefulSet.metadata.namespace $newStatefulSet.metadata.name -}} + {{- $needsRecreation := false -}} + {{- $templates := dict -}} + {{- if $currentStatefulset -}} + {{- if ne (len $newStatefulSet.spec.volumeClaimTemplates) (len $currentStatefulset.spec.volumeClaimTemplates) -}} + {{- $needsRecreation = true -}} + {{- end -}} + {{- range $index, $newVolumeClaimTemplate := $newStatefulSet.spec.volumeClaimTemplates -}} + {{- $currentVolumeClaimTemplateSpec := dict -}} + {{- range $oldVolumeClaimTemplate := $currentStatefulset.spec.volumeClaimTemplates -}} + {{- if eq $oldVolumeClaimTemplate.metadata.name $newVolumeClaimTemplate.metadata.name -}} + {{- $currentVolumeClaimTemplateSpec = $oldVolumeClaimTemplate.spec -}} + {{- end -}} + {{- end }} + {{- $newVolumeClaimTemplateStorageSize := $newVolumeClaimTemplate.spec.resources.requests.storage -}} + {{- if not $currentVolumeClaimTemplateSpec -}} + {{- $needsRecreation = true -}} + {{- else -}} + {{- if ne $newVolumeClaimTemplateStorageSize $currentVolumeClaimTemplateSpec.resources.requests.storage -}} + {{- $needsRecreation = true -}} + {{- $templates = set $templates $newVolumeClaimTemplate.metadata.name $newVolumeClaimTemplateStorageSize -}} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} + {{- if $needsRecreation }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $newStatefulSet.metadata.name }}-recreate + namespace: {{ $newStatefulSet.metadata.namespace }} + labels: + {{- $newStatefulSet.metadata.labels | toYaml | nindent 4 }} + app.kubernetes.io/component: statefulset-recreate-job + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + name: {{ $newStatefulSet.metadata.name }}-recreate + labels: + {{- $newStatefulSet.metadata.labels | toYaml | nindent 8 }} + spec: + serviceAccountName: {{ $newStatefulSet.metadata.name }}-recreate + restartPolicy: OnFailure + containers: + - name: recreate + image: {{ printf "%s/kubectl:%s" (coalesce $.context.Values.global.image.registry "registry.k8s.io") $.context.Capabilities.KubeVersion.Version }} + command: + - kubectl + - delete + - statefulset + - {{ $newStatefulSet.metadata.name }} + - --cascade=orphan + {{- range $index := until (int $currentStatefulset.spec.replicas) }} + {{- range $template, $size := $templates }} + - name: patch-pvc-{{ $template }}-{{ $index }} + image: {{ printf "%s/kubectl:%s" (coalesce $.context.Values.global.image.registry "registry.k8s.io") $.context.Capabilities.KubeVersion.Version }} + command: + - patch + - pvc + - --namespace={{ $newStatefulSet.metadata.namespace }} + - {{ printf "%s-%s-%d" $template $newStatefulSet.metadata.name $index }} + - --type='json' + - '-p=[{"op": "replace", "path": "/spec/resources/requests/storage", "value": "{{ $size }}"}]' + {{- end }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $newStatefulSet.metadata.name }}-recreate + namespace: {{ $newStatefulSet.metadata.namespace }} + labels: + {{- $newStatefulSet.metadata.labels | toYaml | nindent 4 }} + app.kubernetes.io/component: statefulset-recreate-job + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "-10" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $newStatefulSet.metadata.name }}-recreate + namespace: {{ $newStatefulSet.metadata.namespace }} + labels: + {{- $newStatefulSet.metadata.labels | toYaml | nindent 4 }} + app.kubernetes.io/component: statefulset-recreate-job + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "-10" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +rules: + - apiGroups: + - apps + resources: + - statefulsets + resourceNames: + - {{ $newStatefulSet.metadata.name }} + verbs: + - delete + {{- if $templates }} + - apiGroups: + - v1 + resources: + - persistentvolumeclaims + resourceNames: + {{- range $index := until (int $currentStatefulset.spec.replicas) }} + {{- range $template := $templates | keys }} + - {{ printf "%s-%s-%d" $template $newStatefulSet.metadata.name $index }} + {{- end }} + {{- end }} + verbs: + - patch + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $newStatefulSet.metadata.name }}-recreate + namespace: {{ $newStatefulSet.metadata.namespace }} + labels: + {{- $newStatefulSet.metadata.labels | toYaml | nindent 4 }} + app.kubernetes.io/component: statefulset-recreate-job + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "-10" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +subjects: + - kind: ServiceAccount + name: {{ $newStatefulSet.metadata.name }}-recreate + namespace: {{ $newStatefulSet.metadata.namespace }} +roleRef: + kind: Role + name: {{ $newStatefulSet.metadata.name }}-recreate + apiGroup: rbac.authorization.k8s.io +--- + {{- end }} + {{ end }} + {{ end }} + {{ end }} +{{- end -}} diff --git a/charts/tempo-distributed/templates/ingester/statefulset-ingester-recreate-job.yaml b/charts/tempo-distributed/templates/ingester/statefulset-ingester-recreate-job.yaml new file mode 100644 index 0000000000..e49f2f9201 --- /dev/null +++ b/charts/tempo-distributed/templates/ingester/statefulset-ingester-recreate-job.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.ingester.persistence.enabled .Values.ingester.persistence.enableStatefulSetRecreationForSizeChange -}} + {{- include "tempo.statefulset.recreateOnSizeChangeHook" (dict "context" . "pathToStatefulsetTemplate" "/ingester/statefulset-ingester.yaml") -}} +{{- end -}} diff --git a/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml b/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml index 011e6b5be5..017e9f9d8b 100644 --- a/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml +++ b/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml @@ -49,6 +49,9 @@ spec: {{- end }} annotations: checksum/config: {{ include (print $.Template.BasePath "/configmap-tempo.yaml") . | sha256sum }} + {{- if .Values.ingester.persistence.enabled }} + storage/size: {{ .Values.ingester.persistence.size | quote }} + {{- end }} {{- with .Values.tempo.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator-recreate-job.yaml b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator-recreate-job.yaml new file mode 100644 index 0000000000..31792241cd --- /dev/null +++ b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator-recreate-job.yaml @@ -0,0 +1,3 @@ +{{- if and .Values.metricsGenerator.persistence.enabled .Values.metricsGenerator.persistence.enableStatefulSetRecreationForSizeChange -}} + {{- include "tempo.statefulset.recreateOnSizeChangeHook" (dict "context" . "pathToStatefulsetTemplate" "/metrics-generator/statefulset-metrics-generator.yaml") -}} +{{- end -}} diff --git a/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml index bec1efcf0d..b1eb093420 100644 --- a/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml +++ b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml @@ -36,6 +36,9 @@ spec: {{- end }} annotations: checksum/config: {{ include (print $.Template.BasePath "/configmap-tempo.yaml") . | sha256sum }} + {{- if .Values.metricsGenerator.persistence.enabled }} + storage/size: {{ .Values.metricsGenerator.persistence.size | quote }} + {{- end }} {{- with .Values.tempo.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/tempo-distributed/values.yaml b/charts/tempo-distributed/values.yaml index 1a58c98246..8e7c557334 100755 --- a/charts/tempo-distributed/values.yaml +++ b/charts/tempo-distributed/values.yaml @@ -231,6 +231,8 @@ ingester: persistence: # -- Enable creating PVCs which is required when using boltdb-shipper enabled: false + # -- Enable StatefulSetRecreation for changes to PVC size + enableStatefulSetRecreationForSizeChange: false # -- use emptyDir with ramdisk instead of PVC. **Please note that all data in ingester will be lost on pod restart** inMemory: false # -- Size of persistent or memory disk @@ -430,6 +432,10 @@ metricsGenerator: persistence: # -- Enable creating PVCs if you have kind set to StatefulSet. This disables using local disk or memory configured in walEmptyDir enabled: false + # -- Enable StatefulSetRecreation for changes to PVC size. + # This means that the StatefulSet will be deleted, recreated (with the same name) and rolled when a change to the + # PVC size is detected. That way the PVC can be resized without manual intervention. + enableStatefulSetRecreationForSizeChange: false size: 10Gi # -- Storage class to be used. # If defined, storageClassName: .