diff --git a/.github/workflows/runbook-reset-dev-mongo.yaml b/.github/workflows/runbook-reset-dev-mongo.yaml new file mode 100644 index 000000000..23b47025a --- /dev/null +++ b/.github/workflows/runbook-reset-dev-mongo.yaml @@ -0,0 +1,27 @@ +name: Reset Dev Mongo + +on: + workflow_dispatch: + +jobs: + reset-mongo: + name: SSH and Reset Dev MongoDB State + runs-on: ubuntu-latest + steps: + - name: SSH and Reset MongoDB + uses: appleboy/ssh-action@v1.2.0 + with: + host: ${{ secrets.SSH_HOST }} + username: ${{ secrets.SSH_USERNAME }} + key: ${{ secrets.SSH_KEY }} + script: | + set -e # Exit immediately if a command fails + + # Create Mongo job from mongo-reset + kubectl create job --from=cronjob/bt-base-reset-dev-mongo bt-base-reset-dev-mongo-ga-manual + echo "MongoDB reset scheduled." + + # Wait for job_pod log output + job_pod=$(kubectl get pods -o custom-columns=NAME:.metadata.name --no-headers -n bt | grep 'bt-base-reset-dev-mongo-ga-manual') + kubectl wait --for=condition=ready pod/$job_pod -n bt --timeout=30s + kubectl logs -f $job_pod -n bt diff --git a/infra/app/templates/cleanup.yaml b/infra/app/templates/cleanup.yaml index c7ec54d6b..bda957f3a 100644 --- a/infra/app/templates/cleanup.yaml +++ b/infra/app/templates/cleanup.yaml @@ -9,7 +9,7 @@ metadata: spec: template: spec: - serviceAccountName: bt-app-cleanup + serviceAccountName: bt-k8s-role containers: - name: cleanup image: alpine/helm diff --git a/infra/app/templates/datapuller.yaml b/infra/app/templates/datapuller.yaml index c8d42123d..09b3f2207 100644 --- a/infra/app/templates/datapuller.yaml +++ b/infra/app/templates/datapuller.yaml @@ -19,6 +19,8 @@ spec: spec: template: spec: + labels: + {{- include "bt-app.datapullerLabels" $root | nindent 12 }} containers: - name: datapuller-{{ $jobName }} image: {{ printf "%s/%s:%s" $jobConfig.image.registry $jobConfig.image.repository ( toString $jobConfig.image.tag ) }} diff --git a/infra/base/templates/issuer.yaml b/infra/base/templates/issuer.yaml index ba7dfb685..10890c7a5 100644 --- a/infra/base/templates/issuer.yaml +++ b/infra/base/templates/issuer.yaml @@ -1,4 +1,4 @@ -{{ /* https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/#api-tokens */ }} +{{- /* https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/#api-tokens */ -}} apiVersion: cert-manager.io/v1 kind: Issuer metadata: diff --git a/infra/base/templates/cleanup-role.yaml b/infra/base/templates/k8s-role.yaml similarity index 75% rename from infra/base/templates/cleanup-role.yaml rename to infra/base/templates/k8s-role.yaml index 042a2cd30..01647981d 100644 --- a/infra/base/templates/cleanup-role.yaml +++ b/infra/base/templates/k8s-role.yaml @@ -1,14 +1,14 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: bt-app-cleanup + name: bt-k8s-role --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: bt-app-cleanup + name: bt-k8s-role rules: - apiGroups: ["*"] resources: ["*"] @@ -19,12 +19,12 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: bt-app-cleanup + name: bt-k8s-role subjects: - kind: ServiceAccount - name: bt-app-cleanup + name: bt-k8s-role apiGroup: "" roleRef: kind: Role - name: bt-app-cleanup + name: bt-k8s-role apiGroup: "rbac.authorization.k8s.io" diff --git a/infra/base/templates/reset-dev-mongo.yaml b/infra/base/templates/reset-dev-mongo.yaml new file mode 100644 index 000000000..bd193241c --- /dev/null +++ b/infra/base/templates/reset-dev-mongo.yaml @@ -0,0 +1,53 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Release.Name }}-reset-dev-mongo + namespace: bt +spec: + schedule: "0 5 * * *" # Daily at 5 AM, after datapuller + timeZone: America/Los_Angeles + concurrencyPolicy: Forbid + suspend: false + jobTemplate: + spec: + ttlSecondsAfterFinished: 180 + template: + spec: + serviceAccountName: bt-k8s-role + containers: + - name: reset-dev-mongo + image: alpine/k8s:1.29.11 + command: + - sh + - -c + - | + set -e # Exit immediately if a command fails + + # Find stage and dev MongoDB pods + stage_pod=$(kubectl get pods -o custom-columns=NAME:.metadata.name --no-headers -n bt | grep 'bt-stage-mongo') + dev_pod=$(kubectl get pods -o custom-columns=NAME:.metadata.name --no-headers -n bt | grep 'bt-dev-mongo') + + # Dump staging MongoDB state + echo "Dumping staging MongoDB state..." + kubectl exec --namespace=bt \ + "$stage_pod" -- mongodump --archive=/tmp/stage_backup.gz --gzip + kubectl cp --namespace=bt \ + "$stage_pod:/tmp/stage_backup.gz" /tmp/stage_backup.gz + kubectl exec --namespace=bt \ + "$stage_pod" -- rm /tmp/stage_backup.gz + + # Restore dump into dev MongoDB + echo "Restoring dump into dev MongoDB..." + kubectl cp --namespace=bt \ + /tmp/stage_backup.gz "$dev_pod:/tmp/stage_backup.gz" + kubectl exec --namespace=bt \ + "$dev_pod" -- mongosh bt --eval "db.dropDatabase()" + kubectl exec --namespace=bt \ + "$dev_pod" -- mongorestore --archive=/tmp/stage_backup.gz --gzip --drop + kubectl exec --namespace=bt \ + "$dev_pod" -- rm /tmp/stage_backup.gz + + # Cleanup local files + rm /tmp/stage_backup.gz + echo "MongoDB reset completed successfully!" + restartPolicy: Never