diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index d6ae02c147..c1134dd952 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -15,20 +15,194 @@ name: E2E Matrix Tests (bootstrap) on: + workflow_dispatch: pull_request: types: [opened, reopened, synchronize, labeled, unlabeled] branches: - main - - feat/ci-e2e-matrix - workflow_dispatch: + - feat/ci/nightly-e2e-test-nested-env + +concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}" + cancel-in-progress: true -permissions: - contents: read +defaults: + run: + shell: bash jobs: - noop: - name: Bootstrap + e2e-ceph: + name: E2E Pipeline (Ceph) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: ceph + nested_storageclass_name: nested-ceph-pool-r2-csi-rbd + default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate + branch: main + virtualization_tag: main + deckhouse_tag: main + default_user: ubuntu + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + + e2e-replicated: + name: E2E Pipeline (Replicated) + uses: ./.github/workflows/e2e-reusable-pipeline.yml + with: + storage_type: replicated + nested_storageclass_name: nested-thin-r1 + default_cluster_storageclass: ceph-pool-r2-csi-rbd-immediate + branch: main + virtualization_tag: main + deckhouse_tag: main + default_user: ubuntu + go_version: "1.24.6" + e2e_timeout: "3h" + secrets: + DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + + + report-to-channel: runs-on: ubuntu-latest + name: End-to-End tests report + needs: + - e2e-ceph + - e2e-replicated + if: ${{ always()}} steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - name: Send results to channel + run: | + # Map storage types to CSI names + get_csi_name() { + local storage_type=$1 + case "$storage_type" in + "ceph") + echo "rbd.csi.ceph.com" + ;; + "replicated") + echo "replicated.csi.storage.deckhouse.io" + ;; + *) + echo "$storage_type" + ;; + esac + } + + # Parse summary JSON and add to table + parse_summary() { + local summary_json=$1 + local storage_type=$2 + + if [ -z "$summary_json" ] || [ "$summary_json" == "null" ] || [ "$summary_json" == "" ]; then + echo "Warning: Empty summary for $storage_type" + return + fi + + # Try to parse as JSON (handle both JSON string and already parsed JSON) + if ! echo "$summary_json" | jq empty 2>/dev/null; then + echo "Warning: Invalid JSON for $storage_type: $summary_json" + return + fi + + # Parse JSON fields + csi_raw=$(echo "$summary_json" | jq -r '.CSI // empty' 2>/dev/null) + if [ -z "$csi_raw" ] || [ "$csi_raw" == "null" ] || [ "$csi_raw" == "" ]; then + csi=$(get_csi_name "$storage_type") + else + csi="$csi_raw" + fi + + date=$(echo "$summary_json" | jq -r '.Date // ""' 2>/dev/null) + time=$(echo "$summary_json" | jq -r '.StartTime // ""' 2>/dev/null) + branch=$(echo "$summary_json" | jq -r '.Branch // ""' 2>/dev/null) + status=$(echo "$summary_json" | jq -r '.Status // ":question: UNKNOWN"' 2>/dev/null) + passed=$(echo "$summary_json" | jq -r '.Passed // 0' 2>/dev/null) + failed=$(echo "$summary_json" | jq -r '.Failed // 0' 2>/dev/null) + pending=$(echo "$summary_json" | jq -r '.Pending // 0' 2>/dev/null) + skipped=$(echo "$summary_json" | jq -r '.Skipped // 0' 2>/dev/null) + link=$(echo "$summary_json" | jq -r '.Link // ""' 2>/dev/null) + + # Set defaults if empty + [ -z "$passed" ] && passed=0 + [ -z "$failed" ] && failed=0 + [ -z "$pending" ] && pending=0 + [ -z "$skipped" ] && skipped=0 + [ -z "$status" ] && status=":question: UNKNOWN" + + # Validate date + if [ -n "$date" ] && [ "$date" != "" ]; then + current_date=$(date +"%Y-%m-%d") + if date -d "$current_date" +%s >/dev/null 2>&1 && date -d "$date" +%s >/dev/null 2>&1; then + if [ "$(date -d "$current_date" +%s)" -gt "$(date -d "$date" +%s)" ]; then + status=":x: WRONG REPORT DATE!" + fi + fi + fi + + # Format link - use CSI name as fallback if link is empty + if [ -z "$link" ] || [ "$link" == "" ]; then + link_text="$csi" + else + link_text="[:link: $csi]($link)" + fi + + # Add row to table + markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" + } + + # Initialize markdown table + markdown_table="" + header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" + separator="|---|---|---|---|---|---|---|---|---|\n" + markdown_table+="$header" + markdown_table+="$separator" + + # Get current date for header + DATE=$(date +"%Y-%m-%d") + COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n\n" + + # Parse summaries from job outputs + # ceph_summary=${{ toJSON(needs.e2e-ceph.outputs.e2e-summary) }} + # replicated_summary=${{ toJSON(needs.e2e-replicated.outputs.e2e-summary) }} + # Save to json files + cat > /tmp/ceph.json << 'EOF' + ${{ needs.e2e-ceph.outputs.e2e-summary }} + EOF + + cat > /tmp/replicated.json << 'EOF' + ${{ needs.e2e-replicated.outputs.e2e-summary }} + EOF + + if [ -s /tmp/ceph.json ] && [ "$(cat /tmp/ceph.json)" != '""' ] && [ "$(cat /tmp/ceph.json)" != '{}' ]; then + parse_summary "$(cat /tmp/ceph.json)" "ceph" + fi + + if [ -s /tmp/replicated.json ] && [ "$(cat /tmp/replicated.json)" != '""' ] && [ "$(cat /tmp/replicated.json)" != '{}' ]; then + parse_summary "$(cat /tmp/replicated.json)" "replicated" + fi + + # Parse each summary + # if [ -n "$ceph_summary" ] && [ "$ceph_summary" != "null" ]; then + # parse_summary "$ceph_summary" "ceph" + # fi + + # if [ -n "$replicated_summary" ] && [ "$replicated_summary" != "null" ]; then + # parse_summary "$replicated_summary" "replicated" + # fi + + COMBINED_SUMMARY+="${markdown_table}\n" + + echo -e "$COMBINED_SUMMARY" + + # Send to channel if webhook is configured + if [ -n "$LOOP_WEBHOOK_URL" ]; then + curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" + fi + env: + LOOP_WEBHOOK_URL: ${{ secrets.LOOP_TEST_CHANNEL }} \ No newline at end of file diff --git a/.github/workflows/e2e-reusable-pipeline.yml b/.github/workflows/e2e-reusable-pipeline.yml index d9b84d26ca..3ca5528e07 100644 --- a/.github/workflows/e2e-reusable-pipeline.yml +++ b/.github/workflows/e2e-reusable-pipeline.yml @@ -52,8 +52,13 @@ on: go_version: required: false type: string - default: "1.24.5" + default: "1.24.6" description: "Go version" + e2e_timeout: + required: false + type: string + default: "3h" + description: "E2E tests timeout" secrets: DEV_REGISTRY_DOCKER_CFG: required: true @@ -61,8 +66,11 @@ on: required: true PROD_IO_REGISTRY_DOCKER_CFG: required: true - GITHUB_TOKEN: - required: true + outputs: + e2e-summary: + description: "E2E test results" + value: ${{ jobs.e2e-test.outputs.report-summary }} + env: BRANCH: ${{ inputs.branch }} @@ -76,9 +84,826 @@ defaults: shell: bash jobs: - noop: - name: Bootstrap + bootstrap: + name: Bootstrap cluster (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + concurrency: + group: "${{ github.workflow }}-${{ github.event.number || github.ref }}-${{ inputs.storage_type }}" + cancel-in-progress: true + outputs: + kubeconfig-content: ${{ steps.generate-kubeconfig.outputs.config }} + storage-type: ${{ steps.vars.outputs.storage_type }} + nested-storageclass-name: ${{ steps.vars.outputs.nested_storageclass_name }} + steps: + - uses: actions/checkout@v4 + # with: + # ref: ${{ env.BRANCH }} + + - name: Set outputs + id: vars + run: | + namespace="nightly-e2e-${{ inputs.storage_type }}-$(git rev-parse --short HEAD)" + echo "namespace=$namespace" >> $GITHUB_OUTPUT + echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + echo "storage_type=${{ inputs.storage_type }}" >> $GITHUB_OUTPUT + echo "nested_storageclass_name=${{ inputs.nested_storageclass_name }}" >> $GITHUB_OUTPUT + + REGISTRY=$(base64 -d <<< ${{secrets.DEV_REGISTRY_DOCKER_CFG}} | jq '.auths | to_entries | .[] | .key' -r) + USERNAME=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f1) + PASSWORD=$(base64 -d <<< ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | jq '.auths | to_entries | .[] | .value.auth' -r | base64 -d | cut -d ':' -f2) + + echo "registry=$REGISTRY" >> $GITHUB_OUTPUT + echo "username=$USERNAME" >> $GITHUB_OUTPUT + echo "password=$PASSWORD" >> $GITHUB_OUTPUT + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Log in to private registry + uses: docker/login-action@v3 + with: + registry: ${{ steps.vars.outputs.registry }} + username: ${{ steps.vars.outputs.username }} + password: ${{ steps.vars.outputs.password }} + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: Generate values.yaml + run: | + defaultStorageClass=$(kubectl get storageclass -o json \ + | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class" == "true") | .metadata.name') + + cat < test/dvp-over-dvp/values.yaml + namespace: ${{ steps.vars.outputs.namespace }} + storageClass: ${defaultStorageClass} + nfsEnabled: false + nfsSC: nested-nfs-${{ inputs.storage_type }}-${{ steps.vars.outputs.sha_short }} + defaultClusterStorageClass: ${{ inputs.default_cluster_storageclass }} + clusterConfigurationPrefix: ${{ inputs.storage_type }} + sa: dkp-sa + deckhouse: + tag: ${{ env.DECKHOUSE_TAG }} + kubernetesVersion: Automatic + registryDockerCfg: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ${{ env.DEFAULT_USER }} + bootloader: BIOS + ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization + instances: + masterNodes: + count: 1 + cores: 8 + coreFraction: 50% + memory: 14Gi + additionalNodes: + - name: worker + count: 3 + cores: 10 + coreFraction: 25% + memory: 8Gi + nodeType: CloudEphemeral + bootloader: BIOS + EOF + + - name: Bootstrap cluster [infra-deploy] + working-directory: test/dvp-over-dvp + run: | + task infra-deploy + - name: Bootstrap cluster [dhctl-bootstrap] + id: dhctl-bootstrap + working-directory: test/dvp-over-dvp + run: | + task dhctl-bootstrap + timeout-minutes: 30 + - name: Bootstrap cluster [show-connection-info] + working-directory: test/dvp-over-dvp + run: | + task show-connection-info + + - name: Save ssh to secrets in cluster + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + if: always() + run: | + kubectl -n $NAMESPACE create secret generic ssh-key --from-file=test/dvp-over-dvp/tmp/ssh/cloud + + - name: Get info about nested master VM + working-directory: test/dvp-over-dvp + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n ${NAMESPACE} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + echo "Pods" + kubectl get pods -n "${NAMESPACE}" + echo "" + + echo "VMs" + kubectl get vm -n "${NAMESPACE}" + echo "" + + echo "VDs" + kubectl get vd -n "${NAMESPACE}" + echo "" + + echo "login to master" + echo "os-release master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'cat /etc/os-release' + echo "" + + echo "hostname master" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'hostname' + + - name: Generate nested kubeconfig + id: generate-kubeconfig + working-directory: test/dvp-over-dvp + env: + kubeConfigPath: tmp/kube.config + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vssh() { + local cmd=$1 + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c "$cmd" + } + + echo "Copy script for generating kubeconfig in nested cluster" + echo "Copy nested-sa-config/gen-sa.sh to master" + d8vscp "./nested-sa-config/gen-sa.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/gen-sa.sh" + echo "" + d8vscp "./tools/deckhouse-queue.sh" "${DEFAULT_USER}@${nested_master}.${NAMESPACE}:/tmp/deckhouse-queue.sh" + echo "" + + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'chmod +x /tmp/{gen-sa.sh,deckhouse-queue.sh}' + echo "" + + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'ls -la /tmp/' + echo "===" + + echo "Check d8 queue" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.${NAMESPACE} \ + -c 'sudo /tmp/deckhouse-queue.sh' + + echo "Generate kube conf in nested cluster" + echo "run nested-sa-config/gen-sa.sh" + + # "Usage: gen-sa.sh [FILE_NAME]" + echo "===" + d8 v ssh -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + ${DEFAULT_USER}@${nested_master}.$NAMESPACE \ + -c "sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}" + + echo "'sudo /tmp/gen-sa.sh nested-sa nested nested-e2e /${kubeConfigPath}' - done" + echo "" + + echo "Copy kubeconfig to runner" + echo "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath} ./${kubeConfigPath}" + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/${kubeConfigPath}" "./${kubeConfigPath}" + + echo "=== Set rights for kubeconfig ===" + echo "sudo chown 1001:1001 ${kubeConfigPath}" + sudo chown 1001:1001 ${kubeConfigPath} + echo "rights - done" + + echo "Kubeconf to github output" + CONFIG=$(cat ${kubeConfigPath} | base64 -w 0) + CONFIG=$(echo $CONFIG | base64 -w 0) + echo "config=$CONFIG" >> $GITHUB_OUTPUT + + - name: cloud-init logs + if: steps.dhctl-bootstrap.outcome == 'failure' + env: + NAMESPACE: ${{ steps.vars.outputs.namespace }} + run: | + nested_master=$(kubectl -n $NAMESPACE get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}") + + d8vscp() { + local source=$1 + local dest=$2 + d8 v scp -i ./tmp/ssh/cloud \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + $source $dest + echo "d8vscp: $source -> $dest - done" + } + + d8vscp "${DEFAULT_USER}@${nested_master}.$NAMESPACE:/var/log/cloud-init*.log" "./test/dvp-over-dvp/tmp/" + + - name: Prepare artifact + if: always() + run: | + sudo chown -fR 1001:1001 test/dvp-over-dvp + yq e '.deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/values.yaml + yq e 'select(.kind == "InitConfiguration") .deckhouse.registryDockerCfg = "None"' -i ./test/dvp-over-dvp/tmp/config.yaml + echo "${{ steps.generate-kubeconfig.outputs.config }}" | base64 -d | base64 -d > ./test/dvp-over-dvp/kube-config + + - name: Upload generated files + uses: actions/upload-artifact@v4 + id: artifact-upload + if: always() + with: + name: generated-files-${{ inputs.storage_type }} + path: | + test/dvp-over-dvp/tmp + test/dvp-over-dvp/values.yaml + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload ssh config + uses: actions/upload-artifact@v4 + id: artifact-upload-ssh + if: always() + with: + name: generated-files-ssh-${{ inputs.storage_type }} + path: test/dvp-over-dvp/tmp/ssh + overwrite: true + include-hidden-files: true + retention-days: 1 + + - name: Upload kubeconfig config + uses: actions/upload-artifact@v4 + id: artifact-upload-kubeconfig + if: always() + with: + name: generated-files-kubeconfig-${{ inputs.storage_type }} + path: test/dvp-over-dvp/kube-config + overwrite: true + include-hidden-files: true + retention-days: 1 + + configure-storage: + name: Configure storage (${{ inputs.storage_type }}) runs-on: ubuntu-latest + needs: bootstrap steps: - - name: Say hello - run: echo "Bootstrap workflow OK" + - uses: actions/checkout@v4 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup d8 + uses: ./.github/actions/install-d8 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + + - name: Check kubeconfig + run: | + mkdir -p ~/.kube + echo "Configure kube config" + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + + echo "Show paths and files content" + ls -la ~/.kube + chmod 600 ~/.kube/config + + echo "kubectl get nodes" + kubectl config use-context nested-e2e-nested-sa + kubectl get nodes + + - name: Configure replicated storage + if: ${{ inputs.storage_type == 'replicated' }} + working-directory: test/dvp-over-dvp/storage/sds-replicated + run: | + kubectl apply -f mc.yaml + echo "Wait for sds-node-configurator" + kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-node-configurator --timeout=300s + # echo "Wait for sds-replicated" + # kubectl wait --for=jsonpath='{.status.phase}'=Ready modules sds-replicated-volume --timeout=300s + + for i in {1..60}; do + sds_replicated_volume_status=$(kubectl get ns d8-sds-replicated-volume -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${sds_replicated_volume_status}" = "Active" ]]; then + echo "Namespaces sds-replicated-volume are Active" + kubectl -n d8-sds-replicated-volume get pods + break + fi + + echo "Waiting 10s for sds-replicated-volume to be ready" + echo "get ns" + kubectl get ns | grep sds-replicated-volume || echo "Namespaces sds-replicated-volume are not ready" + + if (( i % 5 == 0 )); then + d8 p queue list | head -n25 || echo "No queues" + fi + sleep 10 + done + + echo "Wait bd" + workers=$(kubectl get nodes -o name | grep worker | wc -l) + bdexists=false + count=60 + for i in $(seq 1 $count); do + blockdevices=$(kubectl get blockdevice -o name | wc -l) + if [ $blockdevices -ge $workers ]; then + bdexists=true + break + fi + echo "Wait 10 sec until blockdevices is greater or equal to $workers [${i}/${count}]" + d8 p queue list | head -n25 || echo "No queues" + sleep 10 + done + + if [ $bdexists = false ]; then + echo "Blockdevices is not 3" + echo "Show blockdevice" + kubectl get blockdevice + echo "Show sds namespaces" + kubectl get ns | grep sds || echo "ns sds is not found" + echo "Show cluster nodes" + kubectl get nodes + echo "Show deckhouse logs" + d8 p logs | tail -n 100 + exit 1 + fi + + chmod +x lvg-gen.sh + ./lvg-gen.sh + + chmod +x rsc-gen.sh + ./rsc-gen.sh + + echo "====== Show nested storageclasses =======" + kubectl get sc | grep nested || echo "No nested storageclasses" + echo "Done" + - name: Configure ceph storage + if: ${{ inputs.storage_type == 'ceph' }} + run: | + d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' || echo "Failed to retrieve list queue" + } + + d8_queue() { + local count=90 + local list_queue_ready=false + + for i in $(seq 1 $count) ; do + if [[ "$(d8_queue_list)" == "0" ]]; then + echo "Queue list is clear" + list_queue_ready=true + else + echo "Show queue list" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + fi + + if [[ "$list_queue_ready" = true ]]; then + break + fi + echo "====" + echo "Wait until queues are empty ${i}/${count}" + echo "====" + kubectl get ns | grep sds || echo "ns sds is not ready" + echo " " + sleep 10 + done + } + + cd test/dvp-over-dvp/storage/ceph + export registry=${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} + yq e '.spec.registry.dockerCfg = env(registry)' -i 00-ms.yaml + unset registry + + echo "Create prod module source" + kubectl apply -f 00-ms.yaml + kubectl get ms + + echo "Create ceph operator and csi module config" + kubectl apply -f 01-mc.yaml + + d8_queue + + echo "Start wait for ceph operator and csi" + for i in {1..60}; do + ceph_operator_status=$(kubectl get ns d8-operator-ceph -o jsonpath='{.status.phase}' || echo "False") + csi_ceph_status=$(kubectl get module csi-ceph -o jsonpath='{.status.phase}' || echo "False") + + if [[ "${ceph_operator_status}" = "Active" ]] && [[ "${csi_ceph_status}" = "Ready" ]]; then + echo "Namespaces operator-ceph and csi are Active" + break + fi + + echo "Waiting 10s for ceph operator and csi namespaces to be ready" + echo "get ns" + kubectl get ns | grep ceph || echo "Namespaces operator-ceph and csi are not ready" + + if (( i % 5 == 0 )); then + echo "Show all ns" + kubectl get ns + echo "=====" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + fi + sleep 10 + done + + echo "Create sa" + kubectl apply -f 02-sa.yaml + echo "Create cm (patch existing for configure rbd support)" + kubectl apply -f 03-cm.yaml + echo "Create cluster" + kubectl apply -f 04-cluster.yaml + + echo "get pod in d8-operator-ceph" + kubectl -n d8-operator-ceph get po + + echo "Wait for ceph operator" + for i in {1..60}; do + echo "Check ceph pods, mon mgr osd" + ceph_mgr=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mgr | grep -c Running || echo 0) + ceph_mon=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-mon | grep -c Running || echo 0) + ceph_osd=$(kubectl -n d8-operator-ceph get po 2>/dev/null | grep ceph-osd | grep -c Running || echo 0) + + echo "check if ceph pods are ready" + if [[ "${ceph_mgr}" -ge "2" ]] && [[ "${ceph_mon}" -ge "3" ]] && [[ "${ceph_osd}" -ge "3" ]]; then + echo "Ceph cluster is ready" + break + fi + + echo "Not all pods are ready, ceph_mgr=${ceph_mgr}, ceph_mon=${ceph_mon}, ceph_osd=${ceph_osd}" + echo "Waiting 10s for ceph operator to be ready" + kubectl -n d8-operator-ceph get po || echo "Failed to retrieve pods" + if (( i % 5 == 0 )); then + echo "= Get ceph ns =" + kubectl get ns | grep ceph || echo "Failed to retrieve ceph ns" + echo "= Get mc =" + kubectl get mc | grep ceph || echo "Failed to retrieve mc" + echo "= Get modules =" + kubectl get modules -o wide | grep ceph || echo "Failed to retrieve modules" + echo "=====" + echo "Show queue" + d8 p queue list | head -n25 || echo "Failed to retrieve list queue" + echo "=====" + fi + echo "====" + echo "Wait until all necessary pods are ready ${i}/60" + echo "====" + sleep 10 + done + + echo "Show pods" + kubectl get pods -n d8-operator-ceph + + kubectl apply -f 05-blockpool.yaml + kubectl apply -f 06-toolbox.yaml + echo "Wait for rook-ceph-tools, timeout 300s" + kubectl -n d8-operator-ceph wait --for=condition=Available deployment/rook-ceph-tools --timeout=300s + + echo "-- ls ceph pool --" + kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph osd pool ls + echo "------" + + echo "Configure storage class" + chmod +x ./ceph-configure.sh + ./ceph-configure.sh + + configure-virtualization: + name: Configure Virtualization (${{ inputs.storage_type }}) + runs-on: ubuntu-22.04 + needs: + - bootstrap + - configure-storage + steps: + - uses: actions/checkout@v4 + - name: Install kubectl CLI + uses: azure/setup-kubectl@v4 + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Check kubeconfig + run: | + echo "Configure kube config" + mkdir -p ~/.kube + echo "${{ needs.bootstrap.outputs.kubeconfig-content }}" | base64 -d | base64 -d > ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + + - name: Configure Virtualization + run: | + echo "Apply Virtualization module config" + kubectl apply -f -< ~/.kube/config + chmod 600 ~/.kube/config + kubectl config use-context nested-e2e-nested-sa + kubectl get vmclass + + - name: Download dependencies + working-directory: ./test/e2e/ + run: | + echo "Download dependencies" + go mod download + + - name: Create vmclass for e2e tests + run: | + kubectl get vmclass/generic -o json | jq 'del(.status) | del(.metadata) | .metadata = {"name":"generic-for-e2e","annotations":{"virtualmachineclass.virtualization.deckhouse.io/is-default-class":"true"}} ' | kubectl create -f - + + - name: Run E2E + id: e2e-tests + env: + TIMEOUT: ${{ inputs.e2e_timeout }} + working-directory: ./test/e2e/ + run: | + if [[ "${{ inputs.storage_type }}" == "replicated" ]]; then + export SKIP_IMMEDIATE_SC_CHECK="yes" + fi + STORAGE_CLASS_NAME=${{ inputs.nested_storageclass_name }} FOCUS="VirtualMachineConfiguration" task run:ci -v LABELS="Slow" + + # - uses: actions/upload-artifact@v4 + # if: always() + # with: + # name: resources_from_failed_tests_${{ inputs.storage_type }} + # path: ${{ runner.temp }}/e2e_failed__* + # if-no-files-found: ignore + + - name: Save results + working-directory: ./test/e2e/ + id: report + env: + input_storage_type: ${{ inputs.storage_type }} + if: always() + run: | + if [ -z "$SUMMARY" ]; then + SUMMARY=$(jq -n \ + --arg csi "$input_storage_type" \ + --arg date "$DATE" \ + --arg startTime "$START_TIME" \ + --arg branch "$GITHUB_REF_NAME" \ + --arg status ":question: UNKNOWN" \ + --arg link "$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID/" \ + '{ + CSI: $csi, + Date: $date, + StartTime: $startTime, + Branch: $branch, + Status: $status, + Link: $link + }' + ) + fi + echo $SUMMARY | jq + echo "summary=$SUMMARY" >> $GITHUB_OUTPUT + echo $SUMMARY > "e2e_summary_${{ inputs.storage_type }}_$DATE.json" + + - name: Upload summary test results + uses: actions/upload-artifact@v4 + id: e2e-summary-artifact + if: always() + with: + name: e2e_summary_${{ inputs.storage_type }}_${{ env.DATE }} + path: test/e2e/e2e_summary_${{ inputs.storage_type }}.json + if-no-files-found: ignore + + + undeploy-cluster: + name: Undeploy cluster (${{ inputs.storage_type }}) + runs-on: ubuntu-latest + needs: + - bootstrap + - configure-storage + - configure-virtualization + - e2e-test + # if: always() + if: cancelled() || success() + steps: + - uses: actions/checkout@v4 + + - name: Install htpasswd utility + run: | + sudo apt-get update + sudo apt-get install -y apache2-utils + + - name: Setup d8 + uses: ./.github/actions/install-d8 + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download artifacts + uses: actions/download-artifact@v5 + with: + name: generated-files-${{ inputs.storage_type }} + path: test/dvp-over-dvp/ + + - name: Configure kubectl via azure/k8s-set-context@v4 + uses: azure/k8s-set-context@v4 + with: + method: kubeconfig + context: e2e-cluster-nightly-e2e-virt-sa + kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} + + - name: infra-undeploy + working-directory: test/dvp-over-dvp + run: | + task infra-undeploy diff --git a/.gitignore b/.gitignore index ae343f44fd..63df742d42 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,9 @@ local.Dockerfile # direnv .envrc +# dotenv file +.env + # logs log/ logs/ @@ -60,3 +63,6 @@ retry/ # nodejs node_modules/ package-lock.json + +# values +values.yaml diff --git a/test/dvp-over-dvp/Taskfile.yaml b/test/dvp-over-dvp/Taskfile.yaml new file mode 100644 index 0000000000..cdc348e487 --- /dev/null +++ b/test/dvp-over-dvp/Taskfile.yaml @@ -0,0 +1,244 @@ +# https://taskfile.dev + +version: "3" + +vars: + NAMESPACE: + sh: yq eval '.namespace' values.yaml + DEFAULT_USER: + sh: yq eval '.image.defaultUser' values.yaml + TMP_DIR: ./tmp + SSH_DIR: "{{ .TMP_DIR }}/ssh" + SSH_FILE_NAME: cloud + SSH_PUB_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub" + SSH_PRIV_KEY_FILE: "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + GENERATED_VALUES_FILE: tmp/generated-values.yaml + PASSWORD_FILE: "{{ .TMP_DIR }}/password.txt" + PASSWORD_HASH_FILE: "{{ .TMP_DIR }}/password-hash.txt" +tasks: + default: + silent: true + desc: Preflight / Check if all dependencies are installed + cmds: + - | + deps=("kubectl" "jq" "yq" "docker" "helm" "htpasswd") + for dep in "${deps[@]}"; do + if ! command -v "$dep" >/dev/null 2>&1; then + echo "Required utility '$dep' not found!" + exit 1 + fi + done + echo "All dependencies are installed!" + + password-gen: + desc: Preflight / Generate password for admin@deckhouse.io user + cmds: + - mkdir -p {{ .TMP_DIR }} + - date +%s | sha256sum | base64 | head -c 10 > {{ .PASSWORD_FILE }} + - | + echo $(cat {{ .TMP_DIR }}/password.txt) | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 > {{ .PASSWORD_HASH_FILE }} + status: + - test -f "{{ .PASSWORD_FILE }}" + - test -f "{{ .PASSWORD_HASH_FILE }}" + + ssh-gen: + desc: Preflight / Generate ssh keypair for jump-host + cmds: + - mkdir -p "{{ .SSH_DIR }}" + - yes | ssh-keygen -t ed25519 -b 1024 -f {{ .SSH_PRIV_KEY_FILE }} -N "" -C "cloud" -v + - chmod 0600 "{{ .SSH_PUB_KEY_FILE }}" + - chmod 0400 "{{ .SSH_PRIV_KEY_FILE }}" + status: + - test -f "{{ .SSH_PRIV_KEY_FILE }}" + + render-infra: + desc: Preparation / Generate infra manifests + deps: + - ssh-gen + cmds: + - touch {{ .GENERATED_VALUES_FILE }} + - | + export NEW_KUBECONFIG_B64="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.sshPublicKey = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} + - | + export DOMAIN=$(kubectl get mc global -o json | jq '.spec.settings.modules.publicDomainTemplate | split(".")[1:] | join(".")') + yq eval --inplace '.domain = env(DOMAIN)' {{ .GENERATED_VALUES_FILE }} + - helm template dvp-over-dvp-infra ./charts/infra -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml + + infra-deploy: + deps: + - render-infra + desc: Deploy infra (Namespace/RBAC/Jumphost) + vars: + start_time: + sh: date +%s + cmds: + - kubectl apply -f {{ .TMP_DIR }}/infra.yaml + - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=jump-host --timeout=300s + # - kubectl -n {{ .NAMESPACE }} wait --for=condition=Ready pod -l app=nfs-server --timeout=300s + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + infra-undeploy: + desc: Destroy infra (Namespace/RBAC/Jumphost/...) + cmds: + - kubectl delete -f {{ .TMP_DIR }}/infra.yaml || true + - kubectl wait --for=delete namespace/{{ .NAMESPACE }} --timeout 300s || true + + render-kubeconfig: + desc: Preparation / Generate kubeconfig (infra required) + vars: + SERVER: + sh: echo https://$(kubectl -n d8-user-authn get ingress kubernetes-api -o json | jq .spec.rules[0].host -r) + CERT: + sh: kubectl -n d8-user-authn get secrets kubernetes-tls -o json | jq '.data."tls.crt"' -r + TOKEN: + sh: kubectl -n {{ .NAMESPACE }} get secret dkp-sa-secret -ojson | jq -r '.data.token' | base64 -d + silent: true + cmds: + - | + cat < {{ .TMP_DIR }}/kubeconfig.yaml + apiVersion: v1 + clusters: + - cluster: + server: {{ .SERVER }} + name: dvp + contexts: + - context: + cluster: dvp + namespace: {{ .NAMESPACE }} + user: {{ .NAMESPACE }}@dvp + name: {{ .NAMESPACE }}@dvp + current-context: {{ .NAMESPACE }}@dvp + kind: Config + preferences: {} + users: + - name: {{ .NAMESPACE }}@dvp + user: + token: {{ .TOKEN }} + EOF + + render-cluster-config: + desc: Preparation / Generate cluster config (infra required) + deps: + - render-kubeconfig + - password-gen + cmds: + - touch {{ .GENERATED_VALUES_FILE }} + - | + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.passwordHash = env(PASSWORD_HASH)' {{ .GENERATED_VALUES_FILE }} + - | + export NEW_KUBECONFIG_B64="$(cat {{ .TMP_DIR }}/kubeconfig.yaml | base64 -w 0)" + yq eval --inplace '.kubeconfigDataBase64 = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} + - helm template dvp-over-dvp-cluster-config ./charts/cluster-config -f values.yaml -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + + dhctl-bootstrap: + desc: Bootstrap DKP over DVP + deps: + - render-cluster-config + vars: + start_time: + sh: date +%s + JUMPHOST_EXT_IP: + sh: kubectl -n {{ .NAMESPACE }} exec deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short + JUMPHOST_NODEPORT: + sh: kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq ".spec.ports[] | select(.port==2222) | .nodePort" + cmds: + - | + docker run --pull=always \ + -v "{{ .TMP_DIR }}/config.yaml:/config.yaml" \ + -v "{{ .SSH_DIR }}:/tmp/.ssh/" \ + -v "{{ .TMP_DIR }}/dhctl:/tmp/dhctl/" \ + dev-registry.deckhouse.io/sys/deckhouse-oss/install:{{ .DECKHOUSE_TAG }} \ + dhctl bootstrap \ + --config=/config.yaml \ + --ssh-agent-private-keys=/tmp/.ssh/{{ .SSH_FILE_NAME }} \ + --ssh-user={{ .DEFAULT_USER }} \ + --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ + --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ + --ssh-bastion-user=user \ + {{.CLI_ARGS}} + - | + export end_time=$(date +%s) + difference=$((end_time - {{.start_time}})) + date -ud "@$difference" +'%H:%M:%S' + + show-connection-info: + desc: Show connection info + vars: + DOMAIN: + sh: yq eval '.domain' {{ .GENERATED_VALUES_FILE }} + PASSWORD: + sh: cat {{ .PASSWORD_FILE }} + silent: true + cmds: + - echo "Connect to master task ssh-to-master" + - echo "Grafana URL https://grafana.{{ .NAMESPACE }}.{{ .DOMAIN }}" + - echo "Default user/password admin@deckhouse.io/{{ .PASSWORD}}" + + install: + cmds: + - task: infra-deploy + - task: dhctl-bootstrap + - task: show-connection-info + + ssh-to-master: + desc: ssh to master + vars: + MASTER_NAME: + sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" + cmds: + - /usr/bin/ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} + + kill-dvp-resources: + cmds: + - kubectl -n {{ .NAMESPACE }} delete vm --all --force --grace-period=0 + - kubectl -n {{ .NAMESPACE }} delete vd --all --force --grace-period=0 + - kubectl -n {{ .NAMESPACE }} delete vmip --all --force --grace-period=0 + + clean: + cmds: + - task: infra-undeploy + - rm -rf "{{ .TMP_DIR }}" + + __ssh-command: + silent: true + internal: true + vars: + MASTER_NAME: + sh: kubectl -n {{ .NAMESPACE }} get vm -l dvp.deckhouse.io/node-group=master -o jsonpath="{.items[0].metadata.name}" + cmds: + - /usr/bin/ssh -t -i {{ .SSH_PRIV_KEY_FILE }} -o LogLevel=ERROR -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true {{ .MASTER_NAME }}.{{ .NAMESPACE }} 22' {{ .DEFAULT_USER }}@{{ .MASTER_NAME }} {{ .CMD }} + + kubectl: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl {{ .CLI_ARGS }} + + k9s: + desc: Run kubectl on master. Usage example "task kubectl -- get pods -A" + cmds: + - task: __ssh-command + vars: + CMD: TERM=xterm-256color sudo /usr/local/bin/k9s {{ .CLI_ARGS }} + + configure:cluster:sa: + desc: Configure kubeconfig for nested cluster + vars: + script: gen-sa.sh + cmds: + - rsync -azv -e "ssh -i {{ .SSH_PRIV_KEY_FILE }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o 'ProxyCommand=d8 v port-forward --stdio=true master-0.{{ .NAMESPACE }} 22'" ./nested-sa-config/{{ .script }} cloud@master-0:/tmp/ + - task: __ssh-command + vars: + CMD: sudo chmod +x /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /tmp/{{ .script }} + - task: __ssh-command + vars: + CMD: sudo /opt/deckhouse/bin/kubectl apply -f {{ .config }} diff --git a/test/dvp-over-dvp/charts/cluster-config/.helmignore b/test/dvp-over-dvp/charts/cluster-config/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-over-dvp/charts/cluster-config/Chart.yaml b/test/dvp-over-dvp/charts/cluster-config/Chart.yaml new file mode 100644 index 0000000000..c61a43f29a --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: cluster-config +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml new file mode 100644 index 0000000000..fa7fd15e14 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/cluster-config.yaml @@ -0,0 +1,69 @@ +apiVersion: deckhouse.io/v1 +kind: ClusterConfiguration +clusterType: Cloud +cloud: + provider: DVP + prefix: {{ .Values.clusterConfigurationPrefix | default "e2e" }} +podSubnetCIDR: 10.112.0.0/16 +serviceSubnetCIDR: 10.223.0.0/16 +kubernetesVersion: "{{ .Values.deckhouse.kubernetesVersion }}" +clusterDomain: "internal.cluster.local" +--- +apiVersion: deckhouse.io/v1 +kind: InitConfiguration +deckhouse: + imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss + registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg }} + devBranch: {{ .Values.deckhouse.tag }} +--- +apiVersion: deckhouse.io/v1 +kind: DVPClusterConfiguration +layout: Standard +sshPublicKey: {{ .Values.sshPublicKey }} +masterNodeGroup: + replicas: {{ .Values.instances.masterNodes.count }} + instanceClass: + virtualMachine: + bootloader: {{ .Values.image.bootloader }} + cpu: + cores: {{ .Values.instances.masterNodes.cores }} + coreFraction: {{ .Values.instances.masterNodes.coreFraction }} + memory: + size: {{ .Values.instances.masterNodes.memory }} + ipAddresses: + - Auto + virtualMachineClassName: "{{ .Values.namespace }}-cpu" + rootDisk: + size: 50Gi + storageClass: {{ .Values.storageClass }} + image: + kind: VirtualImage + name: image + etcdDisk: + size: 15Gi + storageClass: {{ .Values.storageClass }} +nodeGroups: +{{- range .Values.instances.additionalNodes }} + - name: {{ .name }} + replicas: {{ .count }} + instanceClass: + virtualMachine: + bootloader: {{ .bootloader }} + cpu: + cores: {{ .cores }} + coreFraction: {{ .coreFraction }} + memory: + size: {{ .memory }} + virtualMachineClassName: "{{ $.Values.namespace }}-cpu" + rootDisk: + size: 50Gi + image: + kind: VirtualImage + name: image + additionalDisks: + - size: 50Gi + storageClass: {{ $.Values.storageClass }} +{{- end }} +provider: + kubeconfigDataBase64: {{ .Values.kubeconfigDataBase64 }} + namespace: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml new file mode 100644 index 0000000000..2ae5da7f95 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/disabled-modules.yaml @@ -0,0 +1,11 @@ +{{/* "local-path-provisioner" */}} +{{- $modules := list "upmeter" "pod-reloader" "secret-copier" "namespace-configurator" -}} +{{ range $modules }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: {{ . }} +spec: + enabled: false +{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml new file mode 100644 index 0000000000..387a3c89bc --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/ingress.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: deckhouse.io/v1 +kind: IngressNginxController +metadata: + name: main +spec: + inlet: HostPort + enableIstioSidecar: false + ingressClass: nginx + hostPort: + httpPort: 80 + httpsPort: 443 + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + operator: Exists diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml new file mode 100644 index 0000000000..369c2eb09f --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/mc.yaml @@ -0,0 +1,96 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: deckhouse +spec: + version: 1 + enabled: true + settings: + bundle: Default + logLevel: Info +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: global +spec: + version: 2 + settings: + {{- if .Values.defaultClusterStorageClass }} + defaultClusterStorageClass: {{ .Values.defaultClusterStorageClass }} + {{- end }} + modules: + publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.domain }}" + https: + certManager: + clusterIssuerName: selfsigned + # clusterIssuerName: letsencrypt-staging + mode: CertManager +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authn +spec: + version: 1 + enabled: true + settings: + controlPlaneConfigurator: + dexCAMode: DoNotNeed + publishAPI: + enabled: true + https: + mode: Global + global: + kubeconfigGeneratorMasterCA: "" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authz +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: cni-cilium +spec: + version: 1 + enabled: true + settings: + tunnelMode: VXLAN +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: prompp +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: prompp +spec: + imageTag: stable + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: snapshot-controller +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml new file mode 100644 index 0000000000..a14d46e181 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/nfs.yaml @@ -0,0 +1,35 @@ +{{ if .Values.nfsEnabled }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: csi-nfs +spec: + source: deckhouse + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: csi-nfs +spec: + imageTag: main + scanInterval: 10m +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: NFSStorageClass +metadata: + name: {{ .Values.nfsSC }} +spec: + connection: + host: "nfs-server.{{ .Values.namespace }}.svc.cluster.local" + share: / + nfsVersion: "4.2" + mountOptions: + mountMode: hard + timeout: 60 + retransmissions: 3 + reclaimPolicy: Delete + volumeBindingMode: Immediate +{{ end }} diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml new file mode 100644 index 0000000000..b3006cc249 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/ngc.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: qemu-guest-agent-install-ubuntu.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["ubuntu-lts", "debian"] + content: | + bb-apt-install qemu-guest-agent + systemctl enable --now qemu-guest-agent +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: astra-d8-dm-modules.conf +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["astra", "ubuntu-lts", "debian"] + content: | + bb-sync-file /etc/modules-load.d/d8-dm-modules.conf - << "EOF" + dm_snapshot + dm_thin_pool + dm_cache + EOF + + systemctl restart systemd-modules-load.service +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: install-tools.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["*"] + content: | + bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" + source <(/opt/deckhouse/bin/kubectl completion bash) + alias k=kubectl + complete -o default -F __start_kubectl k + EOF + + if [ ! -f /usr/local/bin/k9s ]; then + K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) + curl -L "${K9S_URL}" | tar -xz -C /usr/local/bin/ "k9s" + fi + + if [ ! -f /usr/local/bin/stern ]; then + STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) + curl -L "${STERN_URL}" | tar -xz -C /usr/local/bin/ "stern" + fi diff --git a/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml b/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml new file mode 100644 index 0000000000..6b8998a1e8 --- /dev/null +++ b/test/dvp-over-dvp/charts/cluster-config/templates/rbac.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: deckhouse.io/v1 +kind: ClusterAuthorizationRule +metadata: + name: admin +spec: + subjects: + - kind: User + name: admin@deckhouse.io + accessLevel: SuperAdmin + portForwarding: true +--- +apiVersion: deckhouse.io/v1 +kind: User +metadata: + name: admin +spec: + email: admin@deckhouse.io + # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 + password: {{ .Values.passwordHash }} diff --git a/test/dvp-over-dvp/charts/infra/.helmignore b/test/dvp-over-dvp/charts/infra/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/dvp-over-dvp/charts/infra/Chart.yaml b/test/dvp-over-dvp/charts/infra/Chart.yaml new file mode 100644 index 0000000000..e0ab20a245 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: infra +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/dvp-over-dvp/charts/infra/templates/ingress.yaml b/test/dvp-over-dvp/charts/infra/templates/ingress.yaml new file mode 100644 index 0000000000..b813234319 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/ingress.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-80 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + dvp.deckhouse.io/node-group: master +--- +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-443 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + dvp.deckhouse.io/node-group: master +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-https + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + ingressClassName: nginx + rules: + {{- range .Values.ingressHosts }} + - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-443 + port: + number: 443 + {{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-http + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: "*.{{ .Values.namespace }}.{{ .Values.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-80 + port: + number: 80 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml new file mode 100644 index 0000000000..e76f76dbd0 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/jump-host/deploy.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: jump-host + template: + metadata: + labels: + app: jump-host + spec: + containers: + - name: jump-host + image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 + imagePullPolicy: Always + resources: + limits: + cpu: "200m" + memory: "200Mi" + requests: + cpu: "200m" + memory: "200Mi" + ports: + - containerPort: 2222 + env: + - name: SSH_KEY + value: "{{ .Values.sshPublicKey }}" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 diff --git a/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml new file mode 100644 index 0000000000..cacb3421ab --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/jump-host/svc.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + type: NodePort + selector: + app: jump-host + ports: + - protocol: TCP + port: 2222 + targetPort: 2222 diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml new file mode 100644 index 0000000000..99573c35b2 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/deploy.yaml @@ -0,0 +1,44 @@ +{{ if .Values.nfsEnabled }} +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-server + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-server + template: + metadata: + name: nfs-server + labels: + app: nfs-server + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: nfs-server + image: itsthenetwork/nfs-server-alpine:latest + imagePullPolicy: IfNotPresent + env: + - name: SHARED_DIRECTORY + value: "/exports" + volumeMounts: + - mountPath: /exports + name: nfs-data + ports: + - name: tcp-2049 + containerPort: 2049 + protocol: TCP + - name: udp-111 + containerPort: 111 + protocol: UDP + securityContext: + privileged: true + volumes: + - name: nfs-data + persistentVolumeClaim: + claimName: nfs-data +{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml new file mode 100644 index 0000000000..430796d9b1 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/pvc.yaml @@ -0,0 +1,15 @@ +{{ if .Values.nfsEnabled }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-data + namespace: {{ .Values.namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: {{ .Values.storageClass }} +{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml new file mode 100644 index 0000000000..a7e850a669 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/nfs-server/svc.yaml @@ -0,0 +1,21 @@ +{{ if .Values.nfsEnabled }} +--- +kind: Service +apiVersion: v1 +metadata: + name: nfs-server + namespace: {{ .Values.namespace }} + labels: + app: nfs-server +spec: + type: ClusterIP + selector: + app: nfs-server + ports: + - name: tcp-2049 + port: 2049 + protocol: TCP + - name: udp-111 + port: 111 + protocol: UDP +{{ end }} diff --git a/test/dvp-over-dvp/charts/infra/templates/ns.yaml b/test/dvp-over-dvp/charts/infra/templates/ns.yaml new file mode 100644 index 0000000000..77db5f9f65 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} diff --git a/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml b/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml new file mode 100644 index 0000000000..9dec96bfa3 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/rbac/rbac.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.sa}} + namespace: {{ .Values.namespace }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.sa}}-secret + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/service-account.name: {{ .Values.sa}} +type: kubernetes.io/service-account-token +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.sa}}-rb + namespace: {{ .Values.namespace }} +subjects: + - kind: ServiceAccount + name: {{ .Values.sa}} + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: d8:use:role:manager + apiGroup: rbac.authorization.k8s.io diff --git a/test/dvp-over-dvp/charts/infra/templates/vi.yaml b/test/dvp-over-dvp/charts/infra/templates/vi.yaml new file mode 100644 index 0000000000..66034a649d --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/vi.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: image + namespace: {{ .Values.namespace }} +spec: + storage: ContainerRegistry + dataSource: + type: HTTP + http: + url: {{ .Values.image.url }} diff --git a/test/dvp-over-dvp/charts/infra/templates/vmc.yaml b/test/dvp-over-dvp/charts/infra/templates/vmc.yaml new file mode 100644 index 0000000000..39330ced39 --- /dev/null +++ b/test/dvp-over-dvp/charts/infra/templates/vmc.yaml @@ -0,0 +1,7 @@ +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineClass +metadata: + name: "{{ .Values.namespace }}-cpu" +spec: + cpu: + type: Discovery diff --git a/test/dvp-over-dvp/nested-sa-config/gen-sa.sh b/test/dvp-over-dvp/nested-sa-config/gen-sa.sh new file mode 100644 index 0000000000..02e01b5e55 --- /dev/null +++ b/test/dvp-over-dvp/nested-sa-config/gen-sa.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${BLUE}[INFO]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [INFO] $message" >> "$LOG_FILE" + fi +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${GREEN}[SUCCESS]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [SUCCESS] $message" >> "$LOG_FILE" + fi +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${YELLOW}[WARNING]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [WARNING] $message" >> "$LOG_FILE" + fi +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "${RED}[ERROR]${NC} $message" + if [ -n "$LOG_FILE" ]; then + echo "[$timestamp] [ERROR] $message" >> "$LOG_FILE" + fi +} + +exit_trap() { + echo "" + log_info "Exiting..." + echo "" + exit 0 +} + +kubectl() { + sudo /opt/deckhouse/bin/kubectl $@ +} + +trap exit_trap SIGINT SIGTERM + + +SA_NAME=$1 +CLUSTER_PREFIX=$2 +CLUSTER_NAME=$3 +FILE_NAME=$4 + +if [[ -z "$SA_NAME" ]] || [[ -z "$CLUSTER_PREFIX" ]] || [[ -z "$CLUSTER_NAME" ]]; then + log_error "Usage: gen-sa.sh [FILE_NAME]" + exit 1 +fi + +if [[ -z "$FILE_NAME" ]]; then + FILE_NAME=/tmp/kube.config +fi + +SA_TOKEN=virt-${CLUSTER_PREFIX}-${SA_NAME}-token +SA_CAR_NAME=virt-${CLUSTER_PREFIX}-${SA_NAME} + +USER_NAME=${SA_NAME} +CONTEXT_NAME=${CLUSTER_NAME}-${USER_NAME} + +if kubectl cluster-info > /dev/null 2>&1; then + log_success "Access to Kubernetes cluster exists." +else + log_error "No access to Kubernetes cluster or configuration issue." + exit 1 +fi + +sleep 2 +log_info "====" +log_info "Kubeconfig will be created successfully if you connected to k8s cluster via ssh tunnel or directly" +log_info "====" +sleep 2 + + +log_info "Apply SA, Secrets and ClusterAuthorizationRule" +kubectl apply -f -< /etc/ceph/ceph.conf + [global] + mon_host = $(sed 's/[a-z]=//g' /etc/rook/mon-endpoints) + EOF + + cat << EOF > /etc/ceph/ceph.client.admin.keyring + [$ROOK_CEPH_USERNAME] + key = $ROOK_CEPH_SECRET + EOF + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + key: ceph-username + name: rook-ceph-mon + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + key: ceph-secret + name: rook-ceph-mon + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /etc/rook + name: mon-endpoint-volume + containers: + - name: ceph-tools + command: + - sleep + - infinity + image: quay.io/ceph/ceph:v18.2.2 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + tty: true + workingDir: /var/lib/ceph + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - mountPath: /var/lib/ceph + name: homedir + securityContext: + runAsGroup: 167 + runAsNonRoot: true + runAsUser: 167 + volumes: + - name: mon-endpoint-volume + configMap: + defaultMode: 420 + items: + - key: data + path: mon-endpoints + name: rook-ceph-mon-endpoints + - name: ceph-config + emptyDir: {} + - name: homedir + emptyDir: {} diff --git a/test/dvp-over-dvp/storage/ceph/ceph-configure.sh b/test/dvp-over-dvp/storage/ceph/ceph-configure.sh new file mode 100644 index 0000000000..aad18a1bf5 --- /dev/null +++ b/test/dvp-over-dvp/storage/ceph/ceph-configure.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ceph_user_pool=ceph-rbd-pool-r2 +echo "Use user $ceph_user_pool" +echo "Set permissions for user $ceph_user_pool (mgr 'allow *' mon 'allow *' osd 'allow *' mds 'allow *')" +usr=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- \ + ceph auth get-or-create client.$ceph_user_pool mon 'allow *' mgr 'allow *' osd "allow *") +echo "Get fsid" +fsid=$(kubectl -n d8-operator-ceph exec deployments/rook-ceph-tools -c ceph-tools -- ceph fsid) + +userKey="${usr#*key = }" +ceph_monitors_ip=$(kubectl -n d8-operator-ceph get svc | grep mon | awk '{print $3}') +monitors_yaml=$( + for monitor_ip in $ceph_monitors_ip; do + echo " - $monitor_ip:6789" + done +) + +# Verify we have monitors +if [ -z "$monitors_yaml" ]; then + echo "ERROR: No Ceph monitors found" + exit 1 +fi + +echo "Create CephClusterConnection" +kubectl apply -f - <> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: LVMVolumeGroup +metadata: + name: vg-data-${node_name}-${dev_path} +spec: + actualVGNameOnTheNode: vg-thin-data + type: Local + local: + nodeName: ${dev_node} + blockDeviceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ${dev_name} + thinPools: + - name: thin-data + size: ${LVMVG_SIZE} + allocationLimit: 100% +EOF + +done + +kubectl apply -f "${manifest}" diff --git a/test/dvp-over-dvp/storage/sds-replicated/mc.yaml b/test/dvp-over-dvp/storage/sds-replicated/mc.yaml new file mode 100644 index 0000000000..b7d6abda99 --- /dev/null +++ b/test/dvp-over-dvp/storage/sds-replicated/mc.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s diff --git a/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh b/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh new file mode 100644 index 0000000000..7d93443620 --- /dev/null +++ b/test/dvp-over-dvp/storage/sds-replicated/rsc-gen.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +manifest=sds-rsp-rsc.yaml +replicatedStoragePoolName=thin-data + +pools=$(kubectl get lvmvolumegroup -o json | jq '.items[] | {name: .metadata.name, thinPoolName: .spec.thinPools[0].name}' -rc) + +cat << EOF > "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStoragePool +metadata: + name: $replicatedStoragePoolName +spec: + type: LVMThin + lvmVolumeGroups: +EOF + +for pool in ${pools}; do + vg_name=$(echo $pool | jq -r '.name'); + pool_node=$(echo $pool | jq -r '.thinPoolName'); + echo "${pool_node} ${vg_name}" +cat << EOF >> "${manifest}" + - name: ${vg_name} + thinPoolName: ${pool_node} +EOF +done + +cat << EOF >> "${manifest}" +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r2 +spec: + replication: Availability + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1 +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: PreferablyLocal + topology: Ignored +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: nested-thin-r1-immediate +spec: + replication: None + storagePool: $replicatedStoragePoolName + reclaimPolicy: Delete + volumeAccess: Any + topology: Ignored +EOF + +kubectl apply -f ${manifest} + +DEFAULT_STORAGE_CLASS=nested-thin-r1 +kubectl patch mc global --type='json' -p='[{"op": "replace", "path": "/spec/settings/defaultClusterStorageClass", "value": "'"$DEFAULT_STORAGE_CLASS"'"}]' + +sleep 2 +echo "Showing Storage Classes" +kubectl get storageclass +echo " " diff --git a/test/dvp-over-dvp/tools/deckhouse-queue.sh b/test/dvp-over-dvp/tools/deckhouse-queue.sh new file mode 100644 index 0000000000..cada5c5a46 --- /dev/null +++ b/test/dvp-over-dvp/tools/deckhouse-queue.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +get_current_date() { + date +"%H:%M:%S %d-%m-%Y" +} + +get_timestamp() { + date +%s +} + +log_info() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${BLUE}[INFO]${NC} $message" +} + +log_success() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${GREEN}[SUCCESS]${NC} $message" +} + +log_warning() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${YELLOW}[WARNING]${NC} $message" +} + +log_error() { + local message="$1" + local timestamp=$(get_current_date) + echo -e "[$timestamp] ${RED}[ERROR]${NC} $message" +} + +kubectl() { + /opt/deckhouse/bin/kubectl $@ + # sudo /opt/deckhouse/bin/kubectl $@ +} + +d8() { + /opt/deckhouse/bin/d8 $@ + # sudo /opt/deckhouse/bin/d8 $@ +} + + +d8_queue_main() { + echo "$( d8 p queue main | grep -Po '(?<=length )([0-9]+)' )" +} + +d8_queue_list() { + d8 p queue list | grep -Po '([0-9]+)(?= active)' +} + +d8_queue() { + local count=90 + # local main_queue_ready=false + local list_queue_ready=false + + for i in $(seq 1 $count) ; do + # if [ $(d8_queue_main) == "0" ]; then + # echo "main queue is clear" + # main_queue_ready=true + # else + # echo "Show main queue" + # d8 p queue main | head -n25 || echo "Failed to retrieve main queue" + # fi + + if [ $(d8_queue_list) == "0" ]; then + echo "list queue list is clear" + list_queue_ready=true + else + echo "Show queue list" + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + + if [ "$list_queue_ready" = true ]; then + # if [ "$main_queue_ready" = true ] && [ "$list_queue_ready" = true ]; then + break + fi + echo "Wait until queues are empty ${i}/${count}" + sleep 10 + done +} + +d8_ready() { + local ready=false + local count=60 + common_start_time=$(get_timestamp) + for i in $(seq 1 $count) ; do + start_time=$(get_timestamp) + if kubectl -n d8-system wait deploy/deckhouse --for condition=available --timeout=20s 2>/dev/null; then + ready=true + break + fi + end_time=$(get_timestamp) + difference=$((end_time - start_time)) + log_info "Wait until deckhouse is ready ${i}/${count} after ${difference}s" + if (( i % 5 == 0 )); then + kubectl -n d8-system get pods + d8 p queue list | head -n25 || echo "Failed to retrieve queue" + fi + done + + if [ "$ready" = true ]; then + log_success "Deckhouse is Ready!" + echo "Checking queues" + d8_queue + else + common_end_time=$(get_timestamp) + common_difference=$((common_end_time - common_start_time)) + common_formatted_difference=$(date -u +'%H:%M:%S' -d "@$common_difference") + log_error "Deckhouse is not ready after ${count} attempts and ${common_formatted_difference} time, check its queue for errors:" + d8 p queue main | head -n25 + exit 1 + fi +} + +start_time=$(get_timestamp) +log_info "Checking that deckhouse is ready" +d8_ready +end_time=$(get_timestamp) +difference=$((end_time - start_time)) +log_success "Deckhouse is ready after $(date -ud "@$difference" +'%H:%M:%S')" diff --git a/test/e2e/scripts/task_run_ci.sh b/test/e2e/scripts/task_run_ci.sh index 9a3e7a8457..276b961ed6 100755 --- a/test/e2e/scripts/task_run_ci.sh +++ b/test/e2e/scripts/task_run_ci.sh @@ -21,7 +21,12 @@ echo "DATE=$DATE" >> $GITHUB_ENV START_TIME=$(date +"%H:%M:%S") echo "START_TIME=$START_TIME" >> $GITHUB_ENV -go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +if [[ -n $FOCUS ]];then + go tool ginkgo --focus "$FOCUS" -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +else + go tool ginkgo -v --race --timeout=$TIMEOUT | tee $GINKGO_RESULT +fi + EXIT_CODE="${PIPESTATUS[0]}" RESULT=$(sed -e "s/\x1b\[[0-9;]*m//g" $GINKGO_RESULT | grep --color=never -E "FAIL!|SUCCESS!") if [[ $RESULT == FAIL!* || $EXIT_CODE -ne "0" ]]; then