diff --git a/.automation.conf/config.sh b/.automation.conf/config.sh index 84b11dd07..76f2a59a0 100644 --- a/.automation.conf/config.sh +++ b/.automation.conf/config.sh @@ -3,7 +3,14 @@ # See: https://github.com/stackhpc/docker-rally/blob/master/bin/rally-verify-wrapper.sh for a full list of tempest parameters that can be overriden. # You can override tempest parameters like so: -export TEMPEST_CONCURRENCY=2 + +# The Tempest concurrency determines how many tests can be running at once. +# Higher values run tests faster but risk running out of resources and failing tests +# On production systems, Tempest concurrency can usually be set to a high number e.g. 16-64. It is often limited by the number of available floating IPs. +# On virtualised test environments, compute and networking speeds often limit the concurrency to 1-16 before tests begin to fail due to timeouts. +export TEMPEST_CONCURRENCY=16 + + # Specify single test whilst experimenting #export TEMPEST_PATTERN="${TEMPEST_PATTERN:-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name}" @@ -21,9 +28,7 @@ if [ ! -z ${KAYOBE_ENVIRONMENT:+x} ]; then fi if [[ "$KAYOBE_ENVIRONMENT" =~ "ci-multinode" ]]; then - # SMSLab is currently running with 1G switches. This causes tests using volumes and images to fail if - # the concurrency is set too high. - export TEMPEST_CONCURRENCY=1 + export TEMPEST_CONCURRENCY=4 # Uncomment this to perform a full tempest test # export KAYOBE_AUTOMATION_TEMPEST_LOADLIST=tempest-full # export KAYOBE_AUTOMATION_TEMPEST_SKIPLIST=ci-multinode-tempest-full diff --git a/.github/workflows/stackhpc-all-in-one.yml b/.github/workflows/stackhpc-all-in-one.yml index a1defbebd..36a1ea99c 100644 --- a/.github/workflows/stackhpc-all-in-one.yml +++ b/.github/workflows/stackhpc-all-in-one.yml @@ -88,7 +88,7 @@ jobs: - name: Install Package uses: ConorMacBride/install-package@main with: - apt: git unzip nodejs + apt: git unzip nodejs openssh-client # If testing upgrade, checkout previous release, otherwise checkout current branch - name: Checkout ${{ inputs.upgrade && 'previous release' || 'current' }} config @@ -223,6 +223,7 @@ jobs: admin_bootproto: dhcp admin_ips: controller0: "{{ access_ip_v4.value }}" + admin_zone: admin EOF - name: Write Terraform network interface config @@ -443,7 +444,7 @@ jobs: -v $(pwd)/tempest-artifacts:/stack/tempest-artifacts \ -e KAYOBE_ENVIRONMENT -e KAYOBE_VAULT_PASSWORD -e KAYOBE_AUTOMATION_SSH_PRIVATE_KEY \ $KAYOBE_IMAGE \ - /stack/kayobe-automation-env/src/kayobe-config/.automation/pipeline/tempest.sh -e ansible_user=stack -e rally_no_sensitive_log=false + /stack/kayobe-automation-env/src/kayobe-config/.automation/pipeline/tempest.sh -e ansible_user=stack env: KAYOBE_AUTOMATION_SSH_PRIVATE_KEY: ${{ steps.ssh_key.outputs.ssh_key }} diff --git a/.github/workflows/stackhpc-build-kayobe-image.yml b/.github/workflows/stackhpc-build-kayobe-image.yml index f357692c0..677be9ac8 100644 --- a/.github/workflows/stackhpc-build-kayobe-image.yml +++ b/.github/workflows/stackhpc-build-kayobe-image.yml @@ -98,3 +98,25 @@ jobs: push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + + - name: Send message to Slack via Workflow Builder + uses: slackapi/slack-github-action@v1.26.0 + with: + payload: | + { + "channel-id": "${{ env.SLACK_CHANNEL_ID }}", + "inputs": "${{ env.INPUTS }}", + "message": "${{ env.MESSAGE }}", + "results-url": "${{ env.RESULTS_URL }}", + "workflow-url": "${{ env.WORKFLOW_URL }}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + # #release-train-alerts + SLACK_CHANNEL_ID: C03B28HRP53 + INPUTS: >- + branch: ${{ github.ref_name }} + MESSAGE: "SKC Build Kayobe Image workflow failed :sob:" + RESULTS_URL: "N/A" + WORKFLOW_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + if: failure() && github.event_name == 'push' diff --git a/.github/workflows/stackhpc-ci-cleanup.yml b/.github/workflows/stackhpc-ci-cleanup.yml index ed9ec327c..a14b2970c 100644 --- a/.github/workflows/stackhpc-ci-cleanup.yml +++ b/.github/workflows/stackhpc-ci-cleanup.yml @@ -75,3 +75,24 @@ jobs: OS_CLOUD: openstack OS_APPLICATION_CREDENTIAL_ID: ${{ secrets.OS_APPLICATION_CREDENTIAL_ID }} OS_APPLICATION_CREDENTIAL_SECRET: ${{ secrets.OS_APPLICATION_CREDENTIAL_SECRET }} + + - name: Send message to Slack via Workflow Builder + uses: slackapi/slack-github-action@v1.26.0 + with: + payload: | + { + "channel-id": "${{ env.SLACK_CHANNEL_ID }}", + "inputs": "${{ env.INPUTS }}", + "message": "${{ env.MESSAGE }}", + "results-url": "${{ env.RESULTS_URL }}", + "workflow-url": "${{ env.WORKFLOW_URL }}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + # #release-train-alerts + SLACK_CHANNEL_ID: C03B28HRP53 + INPUTS: "N/A" + MESSAGE: "SKC CI Cleanup workflow failed :sob:" + RESULTS_URL: "N/A" + WORKFLOW_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + if: failure() diff --git a/.github/workflows/stackhpc-container-image-build.yml b/.github/workflows/stackhpc-container-image-build.yml index 8251d8757..c737dc7b0 100644 --- a/.github/workflows/stackhpc-container-image-build.yml +++ b/.github/workflows/stackhpc-container-image-build.yml @@ -71,20 +71,21 @@ jobs: # Dynamically define job matrix. # We need a separate matrix entry for each distribution, when the relevant input is true. # https://stackoverflow.com/questions/65384420/how-do-i-make-a-github-action-matrix-element-conditional + # For now include only RL9 in aarch64 - name: Generate build matrix id: set-matrix run: | - echo -n "matrix={\"distro\": [" >> $GITHUB_OUTPUT + echo -n "matrix={\"include\": [" >> $GITHUB_OUTPUT comma="" - if [[ ${{ inputs.rocky-linux-9 }} == 'true' ]]; then - echo -n "$comma\"rocky\"" >> $GITHUB_OUTPUT + if [[ '${{ inputs.rocky-linux-9 }}' == 'true' ]]; then + echo -n "$comma{\"distro\": \"rocky\", \"arch\": \"amd64\"}" >> $GITHUB_OUTPUT comma=", " + echo -n "$comma{\"distro\": \"rocky\", \"arch\": \"aarch64\"}" >> $GITHUB_OUTPUT fi - if [[ ${{ inputs.ubuntu-jammy }} == 'true' ]]; then - echo -n "$comma\"ubuntu\"" >> $GITHUB_OUTPUT - comma=", " + if [[ '${{ inputs.ubuntu-jammy }}' == 'true' ]]; then + echo -n "$comma{\"distro\": \"ubuntu\", \"arch\": \"amd64\"}" >> $GITHUB_OUTPUT fi - echo "], \"arch\": [\"amd64\", \"aarch64\"]}" >> $GITHUB_OUTPUT + echo "]}" >> $GITHUB_OUTPUT - name: Display container datetime tag run: | @@ -99,10 +100,6 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJson(needs.generate-tag.outputs.matrix) }} - # Exclude ubuntu aarch64 builds for now - exclude: - - distro: ubuntu - arch: aarch64 needs: - generate-tag steps: @@ -192,7 +189,7 @@ jobs: args="$args -e kolla_base_arch=${{ matrix.arch }}" fi args="$args -e kolla_base_distro=${{ matrix.distro }}" - args="$args -e kolla_tag=${{ steps.write-kolla-tag.outputs.kolla-tag }} + args="$args -e kolla_tag=${{ steps.write-kolla-tag.outputs.kolla-tag }}" args="$args -e stackhpc_repo_mirror_auth_proxy_enabled=true" source venvs/kayobe/bin/activate && source src/kayobe-config/kayobe-env --environment ci-builder && @@ -320,6 +317,107 @@ jobs: run: if [ $(wc -l < image-build-logs/image-scan-output/critical-images.txt) -gt 0 ]; then cat image-build-logs/image-scan-output/critical-images.txt && exit 1; fi if: ${{ !inputs.push-dirty && !cancelled() }} + create-manifests: + # Only for Rocky Linux for now + name: Create Docker Manifests + if: github.repository == 'stackhpc/stackhpc-kayobe-config' && inputs.push + runs-on: arc-skc-container-image-builder-runner + permissions: {} + needs: + - container-image-build + steps: + - name: Install package dependencies + run: | + sudo apt update + sudo apt install -y git unzip python3-wheel python3-pip python3-venv curl jq wget openssh-server openssh-client + + - name: Install gh + run: | + sudo mkdir -p -m 755 /etc/apt/keyrings && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null + sudo chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null + sudo apt update + sudo apt install gh -y + + - name: Checkout Kayobe Config + uses: actions/checkout@v4 + with: + path: src/kayobe-config + + - name: Install Kayobe + run: | + mkdir -p venvs && + pushd venvs && + python3 -m venv kayobe && + source kayobe/bin/activate && + pip install -U pip && + pip install -r ../src/kayobe-config/requirements.txt + + # Required for Pulp auth proxy deployment and Docker registry login. + # Normally installed during host configure. + - name: Install Docker Python SDK + run: | + sudo pip install docker + + # See etc/kayobe/ansible/roles/pulp_auth_proxy/README.md for details. + # NOTE: We override pulp_auth_proxy_conf_path to a path shared by the + # runner and dind containers. + - name: Deploy an authenticating package repository mirror proxy + run: | + source venvs/kayobe/bin/activate && + source src/kayobe-config/kayobe-env --environment ci-builder && + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-auth-proxy.yml -e pulp_auth_proxy_conf_path=/home/runner/_work/pulp_proxy + env: + KAYOBE_VAULT_PASSWORD: ${{ secrets.KAYOBE_VAULT_PASSWORD }} + + - name: Download artifacts + uses: actions/download-artifact@v4 + + - name: Combine pushed images lists + run: | + find . -name 'push-attempt-images.txt' -exec cat {} + > all-pushed-images.txt + + - name: Log in to Docker registry + run: | + source venvs/kayobe/bin/activate && + source src/kayobe-config/kayobe-env --environment ci-builder && + kayobe playbook run ${KAYOBE_CONFIG_PATH}/ansible/docker-registry-login.yml + env: + KAYOBE_VAULT_PASSWORD: ${{ secrets.KAYOBE_VAULT_PASSWORD }} + + - name: Create and Push Docker Manifests + run: | + set -ex + mkdir -p logs + images=$(cat all-pushed-images.txt | sort | uniq) + # Filter out Ubuntu images + manifest_images=$(echo "$images" | grep 'rocky' | sed -E 's/-(amd64|aarch64)$//' | sort | uniq) + for base_image in $manifest_images; do + arch_images="" + for arch in amd64 aarch64; do + arch_image="${base_image}-${arch}" + # Check if the image exists in the registry + if docker manifest inspect "$arch_image" > /dev/null 2>&1; then + arch_images="$arch_images $arch_image" + fi + done + if [ -n "$arch_images" ]; then + echo "Creating manifest for $base_image with images:$arch_images" | tee -a logs/manifest-creation.log + docker manifest create "$base_image" $arch_images | tee -a logs/manifest-creation.log + docker manifest push "$base_image" | tee -a logs/manifest-creation.log + else + echo "No images found for $base_image, skipping." | tee -a logs/manifest-creation.log + fi + done + + - name: Upload manifest logs + uses: actions/upload-artifact@v4 + with: + name: manifest-logs + path: | + all-pushed-images.txt + logs/manifest-creation.log + # NOTE(mgoddard): Trigger another CI workflow in the # stackhpc-release-train repository. - name: Trigger container image repository sync diff --git a/.github/workflows/stackhpc-multinode-periodic.yml b/.github/workflows/stackhpc-multinode-periodic.yml index cb94705bc..7debe8fa3 100644 --- a/.github/workflows/stackhpc-multinode-periodic.yml +++ b/.github/workflows/stackhpc-multinode-periodic.yml @@ -35,7 +35,7 @@ jobs: name: Multinode periodic needs: - generate-inputs - uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.1.0 + uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.2.0 with: multinode_name: mn-prdc-${{ github.run_id }} os_distribution: ${{ needs.generate-inputs.outputs.os_distribution }} diff --git a/.github/workflows/stackhpc-multinode.yml b/.github/workflows/stackhpc-multinode.yml index 5c8b67d1e..3ec055bb4 100644 --- a/.github/workflows/stackhpc-multinode.yml +++ b/.github/workflows/stackhpc-multinode.yml @@ -52,7 +52,7 @@ name: Multinode jobs: multinode: name: Multinode - uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.1.0 + uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.2.0 with: multinode_name: ${{ inputs.multinode_name }} os_distribution: ${{ inputs.os_distribution }} diff --git a/.github/workflows/stackhpc-promote.yml b/.github/workflows/stackhpc-promote.yml index c68094733..10c160021 100644 --- a/.github/workflows/stackhpc-promote.yml +++ b/.github/workflows/stackhpc-promote.yml @@ -43,3 +43,25 @@ jobs: - name: Display link to container image promotion workflows run: | echo "::notice Container image promote workflow: https://github.com/stackhpc/stackhpc-release-train/actions/workflows/container-promote.yml" + + - name: Send message to Slack via Workflow Builder + uses: slackapi/slack-github-action@v1.26.0 + with: + payload: | + { + "channel-id": "${{ env.SLACK_CHANNEL_ID }}", + "inputs": "${{ env.INPUTS }}", + "message": "${{ env.MESSAGE }}", + "results-url": "${{ env.RESULTS_URL }}", + "workflow-url": "${{ env.WORKFLOW_URL }}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + # #release-train-alerts + SLACK_CHANNEL_ID: C03B28HRP53 + INPUTS: >- + branch: ${{ github.ref_name }} + MESSAGE: "SKC promote workflow failed :sob:" + RESULTS_URL: "N/A" + WORKFLOW_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + if: failure() diff --git a/doc/source/configuration/cephadm.rst b/doc/source/configuration/cephadm.rst index 85f8ad394..6f44eb765 100644 --- a/doc/source/configuration/cephadm.rst +++ b/doc/source/configuration/cephadm.rst @@ -342,6 +342,8 @@ should be used in the Kolla Manila configuration e.g.: manila_cephfs_filesystem_name: manila-cephfs +.. _RGWs-With-Ceph: + RADOS Gateways -------------- diff --git a/doc/source/configuration/firewall.rst b/doc/source/configuration/firewall.rst new file mode 100644 index 000000000..2a14d8d6c --- /dev/null +++ b/doc/source/configuration/firewall.rst @@ -0,0 +1,462 @@ +.. _firewall: + +======== +Firewall +======== + +StackHPC Kayobe configuration provides a standardised firewalld configuration. +The configuration uses the :kayobe-doc:`firewall +` host configuration +functionality of Kayobe. + +The firewall configuration is provided in +``etc/kayobe/inventory/group_vars/all/firewall``. + +Enabling StackHPC firewalld rules +================================= + +The standardised firewalld configuration is not enabled by default and must be +actively opted into. To do so, make the following changes in +``etc/kayobe/.yml`` (or +``etc/kayobe/environments//.yml`` if environments are being +used). + +Controller firewalld Configuration +---------------------------------- + +.. code-block:: yaml + :caption: ``controllers.yml`` + + ############################################################################### + # Controller node firewalld configuration. + + # Whether to install and enable firewalld. + controller_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + controller_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + controller_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + controller_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +Compute firewalld Configuration +------------------------------- + +.. code-block:: yaml + :caption: ``compute.yml`` + + ############################################################################### + # Compute node firewalld configuration. + + # Whether to install and enable firewalld. + compute_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + compute_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + compute_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + compute_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +Storage firewalld Configuration +------------------------------- + +.. code-block:: yaml + :caption: ``storage.yml`` + + ############################################################################### + # storage node firewalld configuration. + + # Whether to install and enable firewalld. + storage_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + storage_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + storage_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + storage_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +If using RADOS Gateway, you can customise ``stackhpc_ceph_firewalld_radosgw_port`` to match +the ``rgw_frontend_port`` as documented in :ref:`RGWs-with-Ceph`. + +Monitoring firewalld Configuration +---------------------------------- + +.. code-block:: yaml + :caption: ``monitoring.yml`` + + ############################################################################### + # monitoring node firewalld configuration. + + # Whether to install and enable firewalld. + monitoring_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + monitoring_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + monitoring_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + monitoring_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +Infrastructure VM firewalld Configuration +----------------------------------------- + +The standard firewalld configuration has rules for wazuh-manager and Ansible +control host Infrastructure VMs. + +.. code-block:: yaml + :caption: ``infra-vms.yml`` + + ############################################################################### + # Infrastructure VM node firewalld configuration + + # Whether to install and enable firewalld. + infra_vm_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + infra_vm_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + infra_vm_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + infra_vm_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +When configuring wazuh-manager, remember to set ``wazuh_dashboard_net_name`` if you have customised +the network where the Wazuh dashboard is exposed. + +Seed firewalld Configuration +---------------------------- + +.. code-block:: yaml + :caption: ``seed.yml`` + + ############################################################################### + # seed node firewalld configuration. + + # Whether to install and enable firewalld. + seed_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + seed_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + seed_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + seed_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +Seed Hypervisor firewalld Configuration +--------------------------------------- + +.. code-block:: yaml + :caption: ``seed_hypervisor.yml`` + + ############################################################################### + # seed_hypervisor node firewalld configuration. + + # Whether to install and enable firewalld. + seed_hypervisor_firewalld_enabled: true + + # A list of zones to create. Each item is a dict containing a 'zone' item. + seed_hypervisor_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + + # A firewalld zone to set as the default. Default is unset, in which case + # the default zone will not be changed. + # Predefined zones are listed here: + # https://firewalld.org/documentation/zone/predefined-zones.html + seed_hypervisor_firewalld_default_zone: trusted + + # A list of firewall rules to apply. Each item is a dict containing + # arguments to pass to the firewalld module. Arguments are omitted if not + # provided, with the following exceptions: + # - offline: true + # - permanent: true + # - state: enabled + seed_hypervisor_firewalld_rules: "{{ stackhpc_firewalld_rules }}" + +The following workaround is needed to prevent VM network traffic from being blocked: + +.. code-block:: yaml + :caption: ``seed_hypervisor.yml`` + + seed_hypervisor_sysctl_parameters: + # By default this is 1, which causes layer 2 traffic flowing through Linux + # bridges to pass through iptables. This blocks traffic from VMs (seed, wazuh) to + # the Internet. + net.bridge.bridge-nf-call-iptables: 0 + +The hope is that in the future this can be replaced by some additional firewalld configuration. + +Kolla-Ansible configuration +--------------------------- + +Ensure Kolla Ansible opens up ports in firewalld for services on the public +API network: + +.. code-block:: yaml + :caption: ``etc/kayobe/kolla/globals.yml`` + + enable_external_api_firewalld: true + external_api_firewalld_zone: "{{ public_net_name | net_zone }}" + +Network configuration +--------------------- + +Ensure every network in ``networks.yml`` has a zone defined. The standard +configuration is to set the internal network zone to ``trusted`` and every +other zone to the name of the network. See +``etc/kayobe/environments/ci-multinode/networks.yml`` for a practical example. + +Custom rules +------------ + +Custom firewalld rules can be added to ``stackhpc_firewalld_rules_extra`` + +The variable is a list of firewall rules to apply. Each item is a dictionary +containing arguments to pass to the firewalld module. The variable can be +defined as a group var or host var in the kayobe inventory. + +The structure of custom rules is different from the default rules. Custom rules +use the firewalld Ansible module format. Arguments are omitted if not provided, +with the following exceptions: + +* ``offline: true`` +* ``permanent: true`` +* ``state: enabled`` + +The main differences are that the ``zone`` argument is mandatory, and the +``network`` argument is not. + +The example below would enable SSH in the ``provision_oc`` zone, and disable +UDP port 1000 in the ``admin_oc`` zone for the Wazuh manager Infrastructure +VM: + +.. code-block:: yaml + :caption: ``etc/kayobe/inventory/group_vars/wazuh_manager/firewall`` + + stackhpc_firewalld_rules_extra: + - service: ssh + zone: "{{ provision_oc_net_name | net_zone }}" + state: enabled + - port: 1000/udp + zone: "{{ admin_oc_net_name | net_zone }}" + state: disabled + +Extra rules have higher precedence than the default rules but are not +validated before being applied. Use with caution. If you need to add a custom +rule, consider adding it to the default rule list with an appropriate boolean +condition, and where possible merge your changes back into upstream SKC. + +Validation +---------- + +The ``kayobe configuration dump`` command can be used to view all the rules +that will be applied to a host. + +.. code-block:: bash + + kayobe configuration dump --var-name stackhpc_firewalld_rules --limit + +A shorter version, ``stackhpc_firewalld_rules_debug`` prints the rules in a +simplified format: + +.. code-block:: bash + + kayobe configuration dump --var-name stackhpc_firewalld_rules_debug --limit + +If the commands above print a template, rather than a list of rules, the +configuration may be invalid. The ``kayobe configuration dump`` command can be +used on other variables such as ``stackhpc_firewalld_rules_default`` or +``stackhpc_*_firewalld_rules_template`` to debug the configuration. See the +`How it works`_ section for more details. + +It can be useful to print the active ports on each type of host, to create +rules for running services. The internal network is currently left open. The +below command will print all other open ports: + +.. code-block:: bash + + ss -lntpu | grep --invert-match '' + +It is strongly recommended that you dry-run the changes using ``--diff`` and +``--check`` before applying to a production system: + +.. code-block:: bash + :caption: ``Overcloud diff example`` + + kayobe overcloud host configure -t firewall --diff --check + +Baseline checks +^^^^^^^^^^^^^^^ + +Before applying, it is a good idea to take note of any actively firing alerts +and run Tempest to gather a baseline. See the :doc:`Tempest +` page for more details. + +Applying changes +---------------- + +Before applying these changes, you should be completely sure you are not going +to lock yourself out of any hosts. If you are deploying these changes to a test +environment, consider setting a password on the stack user so that you can +access the host through a BMC or other virtual console. + +The following Kayobe command can be used to set a password on all overcloud +hosts: + +.. code-block:: bash + + kayobe overcloud host command run --command "echo 'stack:super-secret-password' | sudo chpasswd" --show-output + +The ``firewalld-watchdog.yml`` playbook can be used to set up a timer that +disables the firewalld service after a period of time (default 600s). It should +be used as follows: + +.. code-block:: bash + + # Enable the watchdog BEFORE applying the firewall configuration + kayobe playbook run etc/kayobe/ansible/firewalld-watchdog.yml -l + + # Disable the watchdog after applying the firewall configuration + kayobe playbook run etc/kayobe/ansible/firewalld-watchdog.yml -l -e firewalld_watchdog_state=absent + +If the firewall rules block connectivity, the second playbook run (disabling +the watchdog) will fail. You will still be able to get in after the watchdog +triggers. Remember to disable the watchdog when you are finished, otherwise the +firewall will be disabled! + +Changes should be applied to controllers one at a time to ensure connectivity +is not lost. + +Once you are sure you know what you are doing, use the ``kayobe * host +configure`` commands to apply the firewall changes: + +.. code-block:: bash + + # For Seed Hypervisor hosts + kayobe seed hypervisor host configure -t network,firewall + # For Seed hosts + kayobe seed host configure -t network,firewall + # For Infrastructure VM hosts + kayobe infra vm host configure -t network,firewall + # For the First Controller + kayobe overcloud host configure -t network,firewall -l controllers[0] + # For the Second Controller + kayobe overcloud host configure -t network,firewall -l controllers[1] + # For the Third Controller + kayobe overcloud host configure -t network,firewall -l controllers[2] + # For the rest of the Overcloud hosts + kayobe overcloud host configure -t network,firewall + +Debugging +--------- + +To test the changes, first check for any firing alerts, then try simple smoke +tests (create a VM, list OpenStack endpoints etc.), then run Tempest. + +If the firewall configuration is causing errors, it is often useful to log +blocked packets. + +.. code-block:: bash + + sudo sed -i s/LogDenied=off/LogDenied=all/g /etc/firewalld/firewalld.conf + sudo systemctl restart firewalld + +Dropped packets will be logged to ``dmesg``. + +How it works +============ + +The standard firewall rule configuration is stored in +``etc/kayobe/inventory/group_vars/all/firewall``. + +The file contains sections for different host groups. There are sections for: + +* Common (all hosts) +* Controllers +* Compute +* Storage +* Monitoring +* Wazuh Manager Infrastructure VM +* Ansible Control host Infrastructure VM +* Seed +* Seed Hypervisor + +Each of these sections contains a template. The template is made of sets of +rules. The rules can then be enabled and disabled in sets, based on properties +of the cloud. For example, if ``kolla_enable_designate`` is true, a set of +rules will be enabled in ``stackhpc_controller_firewalld_rules_template``. + +The templates are combined into a single list, +``stackhpc_firewalld_rules_template``. Templates are selected according to the +host's group membership, as well as a set of common rules, which is enabled for +all hosts. + +The rules are then formatted into a single list of the enabled default rules: +``stackhpc_firewalld_rules_default``. The Rules are manipulated to reduce +duplication. When no zone is specified in a rule template, it is inferred from +the network. They are also validated. Conflicting rules will result in an +error. Non-applicable rules are dropped. + +The default rules are combined with any extra rules defined for the deployment. +The complete set of controller firewalld rules is +``stackhpc_firewalld_rules``. diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst index d2dd4ccc6..131dc917f 100644 --- a/doc/source/configuration/index.rst +++ b/doc/source/configuration/index.rst @@ -8,15 +8,16 @@ the various features provided. .. toctree:: :maxdepth: 1 - walled-garden release-train host-images lvm cephadm monitoring - wazuh vault + wazuh + walled-garden + security-hardening + firewall magnum-capi ci-cd - security-hardening cloudkitty diff --git a/doc/source/contributor/environments/ci-aio.rst b/doc/source/contributor/environments/ci-aio.rst index 542f53ec1..953782568 100644 --- a/doc/source/contributor/environments/ci-aio.rst +++ b/doc/source/contributor/environments/ci-aio.rst @@ -10,10 +10,10 @@ automates the manual setup steps below, and is recommended for most users. The manual setup steps are provided for reference, and for users who wish to make changes to the setup process. -.. warning:: +.. seealso:: - This guide was written for the Yoga release and has not been validated for - Caracal. Proceed with caution. + All-in-one GitHub CI testing using this environment is described :ref:`here + `. Prerequisites ============= diff --git a/doc/source/contributor/environments/ci-builder.rst b/doc/source/contributor/environments/ci-builder.rst index a0b898ccd..ec5bbb779 100644 --- a/doc/source/contributor/environments/ci-builder.rst +++ b/doc/source/contributor/environments/ci-builder.rst @@ -16,6 +16,16 @@ In general it is preferable to use the `container image build CI workflow to build container images, but this manual approach may be useful in some cases. +.. seealso:: + + A container image build GitHub Actions workflow using this environment is + described :ref:`here `. + +.. seealso:: + + An overcloud host image build GitHub Actions workflow using this environment + is described :ref:`here `. + Prerequisites ============= diff --git a/doc/source/contributor/environments/ci-multinode.rst b/doc/source/contributor/environments/ci-multinode.rst index aa33edae5..28dc94de4 100644 --- a/doc/source/contributor/environments/ci-multinode.rst +++ b/doc/source/contributor/environments/ci-multinode.rst @@ -19,6 +19,11 @@ beyond the defaults. This includes: * Magnum * Wazuh +.. seealso:: + + On-demand and nightly GitHub Actions workflows workflow using this + environment are described :ref:`here `. + Manila ====== The Multinode environment supports Manila with the CephFS native backend, but it diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst index 0073c48a2..f16a6cbee 100644 --- a/doc/source/contributor/index.rst +++ b/doc/source/contributor/index.rst @@ -13,3 +13,4 @@ This guide is for contributors of the StackHPC Kayobe configuration project. package-updates pre-commit ofed + testing-ci-automation diff --git a/doc/source/contributor/testing-ci-automation.rst b/doc/source/contributor/testing-ci-automation.rst new file mode 100644 index 000000000..fef94e4ae --- /dev/null +++ b/doc/source/contributor/testing-ci-automation.rst @@ -0,0 +1,421 @@ +======================== +Testing, CI & Automation +======================== + +CI hosting clouds +================= + +Several GitHub Actions workflows run on public runners, but some require +private runners. For these, we use a CI hosting cloud. + +Leafcloud +--------- + +`Leafcloud `_ is currently the only supported cloud +for hosting CI workloads. + +Workloads run in the ``stackhpc-ci`` project, and CI workflows authenticate +using a ci+skc@stackhpc.com user account. This is an alias for the +`ci@stackhpc.com Google group +`_, which may be subscribed to +by multiple StackHPC Engineers. Credentials for this account should be shared +amongst a few StackHPC Engineers. + +An autoscaling `Actions Runner Controller (ARC) +`_ +cluster also lives in the ``stackhpc-ci`` project, and runs several jobs that +require access to the cloud or benefit from data-locality with Ark. + +SMS Lab +------- + +SMS lab will soon be added as a supported CI hosting cloud. Several +considerations must be made when porting CI to Leafcloud. + +Many of our CI and other workflows require access to large volumes of data +stored in Ark. This includes package repositories, container images, disk +images, etc. Naively accessing this data from SMS lab will result in high +Internet usage and slow jobs. The previous incarnation of SMS lab hosted a +"Test Pulp" instance that acted as a local mirror of package repositories and +container images. This worked, but required explicit syncing with Ark when +content is updated, and was a bit brittle. + +For SMS lab 2.0, we propose a different approach. Package repository data is +smaller than container images, but we might still benefit from the use of a +Squid caching proxy. For container images we will use a `Docker registry mirror +`_ as a pull-through cache. + +Container and host image build jobs require significant data uploads, and may +still need to run on Leafcloud to avoid long delays while transferring data to +Ark. + +CI for pull requests (PRs) +========================== + +Continuous Integration (CI) is used within StackHPC Kayobe Configuration to +perform various tests against pull requests. The top-level workflow is in +``.github/workflows/stackhpc-pull-request.yml``. It includes the following +jobs: + +``check-changes`` + Determines which other jobs need to run, based on files that are changed in + the PR. The ``.github/path-filters.yml`` file contains the paths. +``Tox pep8 with Python 3.10`` + Runs the Tox ``pep8`` environment. +``Tox releasenotes with Python 3.10`` + Builds the release notes using the Tox ``releasenotes`` environment. The + separate release notes are not really used - rather they are integrated into + the main documentation. +``Tox docs with Python 3.10`` + Builds the documentation using the Tox ``docs`` environment. +``Build Kayobe Image`` + Builds a Kayobe container image for the pull request and pushes it to GHCR. + Uses the ``.github/workflows/stackhpc-build-kayobe-image.yml`` reusable + workflow. +``Check container image tags`` + Checks that: + + - the image to container mapping in ``tools/kolla-images.py`` matches Kolla + Ansible. + - the container tag hierarchy in ``tools/kolla-images.py`` matches Kolla + Ansible. + - the container image tags defined in ``etc/kayobe/kolla-image-tags.yml`` are + present in the ``stackhpc-dev`` namespace in Ark. + + Uses the ``.github/workflows/stackhpc-check-tags.yml`` reusable workflow, + which runs the ``etc/kayobe/ansible/check-tags.yml`` and + ``etc/kayobe/ansible/check-kolla-images-py.yml`` playbooks. +``aio [upgrade] ( )`` + Runs an all-in-one OpenStack deployment test. + Various jobs are run using different parameters. + Uses the ``.github/workflows/stackhpc-all-in-one.yml`` reusable workflow. + See :ref:`below ` for further details. + +.. _testing-ci-aio: + +All in one testing +------------------ + +The ``.github/workflows/stackhpc-all-in-one.yml`` reusable workflow accepts +various parameters, and the following are used to create a test matrix for PRs: + + - Operating System (Rocky 9, Ubuntu Jammy) + - Neutron plugin (OVS, OVN) + - Upgrade or no upgrade + +The workflow runs on an autoscaling `Actions Runner Controller (ARC) +`_ +cluster, and the GitHub runner acts as both a Terraform client and an Ansible +control host. Kayobe is executed using kayobe-automation within another +container, using the Kayobe container image built in the ``Build Kayobe Image`` +job. + +The workflow performs the following high-level steps: + +#. Deploy a VM on an OpenStack cloud using the `aio + `_ + Terraform configuration. +#. Deploy OpenStack in the VM using Kayobe and the :doc:`ci-aio + ` environment. If this is an upgrade job, the previous + OpenStack release is deployed. +#. Register test resources in the cloud under test (images, flavors, networks, + subnets, routers, etc.). +#. If this is an upgrade job, upgrade the cloud under test to the target + release. +#. Run Tempest and `StackHPC OpenStack Tests + `_ to test the cloud. +#. Collect diagnostic information. +#. Upload results as an artifact. +#. Destroy the VM using Terraform. + +In order to create VMs on the cloud hosting the CI, we need a few things: + +- an OpenStack project with sufficient quota to run CI jobs for several PRs + concurrently +- an OpenStack user account +- a ``clouds.yaml`` file +- an application credential to authenticate with the cloud +- a flavor for the VM (minimum 8GiB RAM) +- a set of images for the VM +- a network and subnet for the VM +- SSH connectivity from the GitHub runner to the VM +- access from the VM to the Internet + +This information is provided to GitHub Actions using `secrets +`_ +and `variables +`_. +`GitHub environments +`_ +are used to allow running jobs on different clouds. + +``KAYOBE_VAULT_PASSWORD`` is a repository-scoped GitHub secret containing the +Ansible Vault password for the ``ci-aio`` Kayobe environment. + +The following GitHub secrets are defined in each GitHub environment: + +- ``CLOUDS_YAML`` +- ``OS_APPLICATION_CREDENTIAL_ID`` +- ``OS_APPLICATION_CREDENTIAL_SECRET`` + +The following GitHub variables are defined in each GitHub environment: + +- ``AIO_FLAVOR`` +- ``AIO_NETWORK`` +- ``AIO_SUBNET`` +- ``OS_CLOUD`` + +Glance images for all-in-one VMs are not configured using GitHub variables. +Instead we use the overcloud host images that are built and uploaded to Ark. +These are also uploaded to clouds running CI, with well-known names using the +versions defined in ``etc/kayobe/pulp-host-image-versions.yml``. + +.. _ci-promotion: + +Promotion +========= + +The ``.github/workflows/stackhpc-promote.yml`` workflow runs on a push to any +release branch of StackHPC Kayobe Configuration. It triggers other workflows in +StackHPC Release Train to promote the `package repositories +`_ +and `container images +`_ +referenced in the configuration. + +The standard GitHub API token available in the workflow (``GITHUB_TOKEN``) is +not allowed to trigger a workflow in another repository. To do this, we use a +`fine-grained PAT token +`_ +owned by the ``stackhpc-ci`` GitHub user. This token has read/write permissions +on actions for the ``stackhpc/stackhpc-release-train`` repository. The token is +stored as the ``STACKHPC_RELEASE_TRAIN_TOKEN`` GitHub secret in the StackHPC +Kayobe Configuration repository. The token expires periodically and must be +regenerated, after which the secret must be updated. + +Tag and release +=============== + +The ``.github/workflows/tag-and-release.yml`` workflow runs on a push to any +release branch of StackHPC Kayobe Configuration. It generates a Git tag and +an accompanying GitHub release. See also the `Release Train documentation +`__. + +CI cleanup +========== + +The ``.github/workflows/stackhpc-ci-cleanup.yml`` workflow runs periodically +(currently every 2 hours). It checks for all-in-one CI VMs older than 3 hours +and deletes them, to avoid excess cloud resource consumption. + +.. _testing-container-images: + +Container images +================ + +The ``.github/workflows/stackhpc-container-image-build.yml`` workflow runs on +demand, and is used to build Kolla container images. The process for building +images and updating the configuration to use them is described in the `Release +Train documentation +`__. + +The workflow runs as a matrix, with a job for each supported container OS +distribution. The workflow runs on an autoscaling `Actions Runner Controller +(ARC) +`_ +cluster, and the GitHub runner acts as both the Ansible control host and +container image build host. + +A Pulp authentication proxy container is deployed on the runner that provides +unauthenticated access to the package repositories in Ark. This avoids leaking +Ark credentials into the built container images. + +Once built, images are scanned for vulnerabilities using `Trivy +`_. Any critical vulnerabilities will break the build, +unless the ``push-dirty`` input is true. + +If the ``push`` input is true, images are pushed to Ark, and a `container sync +`_ +workflow is triggered in the StackHPC Release Train repository. See +:ref:`here ` for information on triggering workflows in another repository. + +An artifact containing image build logs is uploaded on completion. + +.. _testing-host-images: + +Overcloud host images +===================== + +The ``.github/workflows/overcloud-host-image-build.yml`` workflow runs on +demand, and is used to build overcloud host images. + +The workflow runs as a single job, building each supported container OS +distribution sequentially. The workflow runs on an autoscaling `Actions Runner +Controller (ARC) +`_ +cluster, and the GitHub runner acts as both a Terraform client and an Ansible +control host. Similarly to the all-in-one CI testing, Terraform is used to +create a VM on a cloud that is then used for building images. + +The following steps are taken for each supported image: + +#. Build an image using Kayobe +#. Upload the image to Ark +#. Upload the image to clouds hosting CI + +At the end of the job, build logs are uploaded as an artifact and the VM is +destroyed. + +In order to create a VM on the cloud hosting the CI, we need a few things: + +- an OpenStack project with sufficient quota to run at least one build VM +- an OpenStack user account +- a ``clouds.yaml`` file +- an application credential to authenticate with the cloud +- a flavor for the VM (minimum 8GiB RAM) +- a Rocky Linux 9 image for the VM +- a network and subnet for the VM +- SSH connectivity from the GitHub runner to the VM +- access from the VM to the Internet + +This information is provided to GitHub Actions using `secrets +`_ +and `variables +`_. +`GitHub environments +`_ +are used to allow running jobs on different clouds. + +``KAYOBE_VAULT_PASSWORD`` is a repository-scoped GitHub secret containing the +Ansible Vault password for the ``ci-builder`` Kayobe environment. + +The following GitHub secrets are defined in each GitHub environment: + +- ``CLOUDS_YAML`` +- ``OS_APPLICATION_CREDENTIAL_ID`` +- ``OS_APPLICATION_CREDENTIAL_SECRET`` + +The following GitHub variables are defined in each GitHub environment: + +- ``HOST_IMAGE_BUILD_FLAVOR`` +- ``HOST_IMAGE_BUILD_IMAGE`` +- ``HOST_IMAGE_BUILD_NETWORK`` +- ``HOST_IMAGE_BUILD_SUBNET`` +- ``OS_CLOUD`` + +The ``.github/workflows/overcloud-host-image-promote.yml`` workflow runs on +demand and is used to promote overcloud host images. Unlike package +repositories and container images, host image promotion is still an manual +step. + +The ``.github/workflows/overcloud-host-image-upload.yml`` workflow runs on +demand and is used to upload images to clouds hosting CI. It is mainly used +when this step failed in a previous host image build job. + +.. _testing-multinode: + +Multinode test clusters +======================= + +The ``.github/workflows/stackhpc-multinode.yml`` workflow runs on demand and is +used to create a multinode test cluster. The +``.github/workflows/stackhpc-multinode-periodic.yml`` workflow runs +periodically (currently nightly) and runs a random test configuration +(generated by ``.github/workflows/multinode-inputs.py``). + +Both workflows use a `reusable workflow +`_ +in the StackHPC OpenStack GitHub Workflows repository. Note that since this +workflow is in a different repository and we reference it with a tag, changes +to the reusable workflow are not picked up until the tag is bumped. + +The workflow runs on an autoscaling `Actions Runner Controller (ARC) +`_ +cluster, and the GitHub runner acts as a Terraform client. Kayobe is executed +on another VM that acts as the Ansible control host. + +The workflow performs the following high-level steps: + +#. Deploy a set of VMs on an OpenStack cloud using the `Terraform Kayobe + Multinode `_ + Terraform configuration. +#. Configure one of the VMs as an Ansible control host for Kayobe. +#. Deploy OpenStack in the other VMs using Kayobe and the :doc:`ci-multinode + ` environment. If this is an upgrade job, the + previous OpenStack release is deployed. +#. Register test resources in the cloud under test (images, flavors, networks, + subnets, routers, etc.). +#. Run Tempest and `StackHPC OpenStack Tests + `__ to test the cloud. +#. If this is an upgrade job, upgrade the cloud under test to the target + release. +#. Run Tempest and `StackHPC OpenStack Tests + `__ to test the cloud. +#. Collect diagnostic information. +#. Upload results as an artifact. +#. Destroy the VMs using Terraform. +#. For nightly jobs, send a Slack alert to ``#release-train-alerts`` on + failure. + +In order to create VMs on the cloud hosting the CI, we need a few things: + +- an OpenStack project with sufficient quota to create several clusters + concurrently +- an OpenStack user account +- a ``clouds.yaml`` file +- an application credential to authenticate with the cloud +- flavors for each type of VM +- a set of images for the VMs +- a network and subnet for the VMs +- a floating IP pool or external network for the Ansible control host (optional) +- SSH connectivity from the GitHub runner to the Ansible control host VM +- access from the VMs to the Internet + +This information is provided to GitHub Actions using `secrets +`_ +and `variables +`_. +`GitHub environments +`_ +are used to allow running jobs on different clouds. + +``KAYOBE_VAULT_PASSWORD_CI_MULTINODE`` is a repository-scoped GitHub secret +containing the Ansible Vault password for the ``ci-multinode`` Kayobe +environment. + +The following GitHub secrets are defined in each GitHub environment: + +- ``CLOUDS_YAML`` +- ``OS_APPLICATION_CREDENTIAL_ID`` +- ``OS_APPLICATION_CREDENTIAL_SECRET`` + +The following GitHub variables are defined in each GitHub environment: + +- ``MULTINODE_ANSIBLE_CONTROL_VM_FLAVOR`` +- ``MULTINODE_FIP_POOL`` +- ``MULTINODE_FLAVOR`` +- ``MULTINODE_INFRA_VM_FLAVOR`` +- ``MULTINODE_NETWORK`` +- ``MULTINODE_SEED_VM_FLAVOR`` +- ``MULTINODE_STORAGE_FLAVOR`` +- ``MULTINODE_SUBNET`` +- ``OS_CLOUD`` + +Glance images for multinode VMs are not configured using GitHub variables. +Instead we use the overcloud host images that are built and uploaded to Ark. +These are also uploaded to clouds running CI, with well-known names using the +versions defined in ``etc/kayobe/pulp-host-image-versions.yml``. + +For multinode clusters created on demand, it is possible to pause the workflow +execution on certain conditions and gain access to the cluster for a limited +period of time. This can be used to interact with the system to investigate +faults, debug, etc. To do this, use the ``break_on`` and ``break_duration`` +workflow inputs. + +Slack alerts +============ + +Slack alerts are sent when certain automatically-triggered workflows fail. See +the `Release Train documentation +`__ for +more details. diff --git a/doc/source/operations/secret-rotation.rst b/doc/source/operations/secret-rotation.rst index 6e685f4db..dfdf25000 100644 --- a/doc/source/operations/secret-rotation.rst +++ b/doc/source/operations/secret-rotation.rst @@ -46,7 +46,7 @@ Full method the state of the cloud before any changes are made 2. Edit your Kolla-Ansible checkout to include changes not yet included - upstream. + upstream. .. _kolla-change: @@ -70,7 +70,7 @@ Full method .. code:: bash git fetch https://review.opendev.org/openstack/kolla-ansible refs/changes/78/903178/2 && git cherry-pick FETCH_HEAD - + 3. Re-install Kolla-Ansible from source in your Kolla-Ansible Python environment @@ -101,7 +101,7 @@ Full method ^redis_master_password ^memcache_secret_key _ssh_key - + private_key public_key ^$ @@ -194,17 +194,32 @@ Full method 2. Update the value of ``grafana_admin_password`` in ``passwords.yml`` - 3. Exec into the Grafana container on a controller + 3. Exec into the MariaDB container on a controller then login to MariaDB + + .. code:: bash + + sudo docker exec -u 0 -it mariadb bash + (mariadb) mysql grafana -p + # Enter database password when prompted + + 4. Query for the ID of ``grafana_local_admin`` + + .. code:: sql + + SELECT id,login FROM user WHERE login = "grafana_local_admin"; + # Take a note of this ID + + 5. Exec into the Grafana container on a controller .. code:: bash sudo docker exec -it grafana bash - 4. Run the password reset command, then enter the new password + 6. Run the password reset command, then enter the new password .. code:: bash - grafana-cli admin reset-admin-password --password-from-stdin + grafana-cli admin reset-admin-password --user-id --password-from-stdin 12. Update the MariaDB database password diff --git a/doc/source/operations/upgrading-openstack.rst b/doc/source/operations/upgrading-openstack.rst index c4e1bb6f0..0bcba45e5 100644 --- a/doc/source/operations/upgrading-openstack.rst +++ b/doc/source/operations/upgrading-openstack.rst @@ -113,6 +113,14 @@ The domain set should be something that is not use anywhere else such as The Neuron DNS integration can be disabled by setting ``neutron_dns_integration: false`` in ``kolla/globals.yml`` +Redis Default User +------------------ + +The ``redis_connection_string`` has changed the username used from ``admin`` +to ``default``. Whilst this does not have any negative impact on services +that utilise Redis it will feature prominently in any preview of the overcloud +configuration. + Known issues ============ @@ -486,7 +494,7 @@ Save the old configuration locally. .. code-block:: console - kayobe overcloud service configuration save --node-config-dir /etc/kolla --output-dir ~/kolla-diff/old --limit controllers[0],compute[0],storage[0] + kayobe overcloud service configuration save --node-config-dir /etc/kolla --output-dir ~/kolla-diff/old --limit controllers[0],compute[0],storage[0] --exclude ironic-agent.initramfs,ironic-agent.kernel Generate the new configuration to a tmpdir. @@ -498,7 +506,7 @@ Save the new configuration locally. .. code-block:: console - kayobe overcloud service configuration save --node-config-dir /tmp/kolla --output-dir ~/kolla-diff/new --limit controllers[0],compute[0],storage[0] + kayobe overcloud service configuration save --node-config-dir /tmp/kolla --output-dir ~/kolla-diff/new --limit controllers[0],compute[0],storage[0] --exclude ironic-agent.initramfs,ironic-agent.kernel The old and new configuration will be saved to ``~/kolla-diff/old`` and ``~/kolla-diff/new`` respectively on the Ansible control host. diff --git a/etc/kayobe/ansible/firewalld-watchdog.yml b/etc/kayobe/ansible/firewalld-watchdog.yml new file mode 100644 index 000000000..874992df7 --- /dev/null +++ b/etc/kayobe/ansible/firewalld-watchdog.yml @@ -0,0 +1,69 @@ +--- +# This playbook can be applied in advance of rolling out a firewall +# configuration. It sets up a timer that disables the firewalld service after a +# period of time (default 600s). It should be used as follows: +# 1. Enable firewalld-watchdog +# kayobe playbook run etc/kayobe/ansible/firewalld-watchdog.yml -l +# 2. Apply firewall config +# kayobe host configure -l -t network,firewall +# 3. Disable watchdog +# kayobe playbook run etc/kayobe/ansible/firewalld-watchdog.yml -l -e firewalld_watchdog_state=absent +# If the firewall changes result in being locked out of the system, the +# watchdog will disable the firewall after the timeout. +# Remember to disable the watchdog, otherwise the firewall will be disabled! + +- name: Create a systemd timer to stop firewalld + hosts: seed:seed-hypervisor:overcloud:infra-vms + tags: + - firewalld-watchdog + vars: + # Watchdog state: present or absent. + firewalld_watchdog_state: present + # Watchdog timeout in seconds. + firewalld_watchdog_timeout_s: 600 + become: true + tasks: + - when: firewalld_watchdog_state == 'present' + block: + - name: Create firewalld-watchdog service unit file + ansible.builtin.copy: + dest: /etc/systemd/system/firewalld-watchdog.service + content: | + [Unit] + Description=Firewalld watchdog service + + [Service] + Type=oneshot + ExecStart=/usr/bin/systemctl stop firewalld + register: service_result + + - name: Create firewalld-watchdog timer unit file + ansible.builtin.copy: + dest: /etc/systemd/system/firewalld-watchdog.timer + content: | + [Unit] + Description=Firewalld watchdog timer + + [Timer] + OnActiveSec={{ firewalld_watchdog_timeout_s }} + Unit=firewalld-watchdog.service + + [Install] + WantedBy=timers.target + register: timer_result + + - name: Enable or disable firewalld-watchdog timer + ansible.builtin.systemd_service: + name: firewalld-watchdog.timer + daemon_reload: "{{ service_result is changed or timer_result is changed }}" + enabled: false + state: "{{ 'started' if firewalld_watchdog_state == 'present' else 'stopped' }}" + + - name: Remove firewalld-watchdog unit files + ansible.builtin.file: + path: "/etc/systemd/system/{{ item }}" + state: absent + loop: + - firewalld-watchdog.service + - firewalld-watchdog.timer + when: firewalld_watchdog_state == 'absent' diff --git a/etc/kayobe/apt.yml b/etc/kayobe/apt.yml index 08db21910..cb24d7484 100644 --- a/etc/kayobe/apt.yml +++ b/etc/kayobe/apt.yml @@ -33,6 +33,8 @@ apt_config: apt_keys: - url: "https://download.docker.com/linux/ubuntu/gpg" filename: docker.asc + - url: "https://download.ceph.com/keys/release.asc" + filename: ceph.asc # A list of Apt repositories. Each item is a dict with the following keys: # * types: whitespace-separated list of repository types, e.g. deb or deb-src @@ -70,6 +72,12 @@ stackhpc_apt_repositories: signed_by: docker.asc architecture: amd64 required: true + - url: "{{ stackhpc_repo_ceph_reef_debian_url }}" + suites: "{{ ansible_facts.distribution_release }}" + components: main + signed_by: ceph.asc + architecture: amd64 + required: true # Do not replace apt configuration for non-overcloud hosts. This can result in # errors if apt reconfiguration is performed before local repository mirrors diff --git a/etc/kayobe/environments/ci-aio/controllers.yml b/etc/kayobe/environments/ci-aio/controllers.yml index b34536705..8972187df 100644 --- a/etc/kayobe/environments/ci-aio/controllers.yml +++ b/etc/kayobe/environments/ci-aio/controllers.yml @@ -10,3 +10,11 @@ controller_bootstrap_user: "{{ os_distribution if os_distribution == 'ubuntu' el # for the exact configuration. controller_lvm_groups: - "{{ stackhpc_lvm_group_rootvg }}" + +# Controller firewalld configuration. See inventory/group_vars/all/firewall for +# the exact configuration. +controller_firewalld_zones: "{{ stackhpc_firewalld_zones }}" +controller_firewalld_rules: "{{ stackhpc_firewalld_rules }}" +# FIXME(wszumski): Firewall disabled in OVS temporarily until someone has a change to fix it. +controller_firewalld_enabled: "{{ kolla_enable_ovn | bool }}" +controller_firewalld_default_zone: "drop" diff --git a/etc/kayobe/environments/ci-aio/hooks/overcloud-host-configure/post.d/10-debug-firewall.yml b/etc/kayobe/environments/ci-aio/hooks/overcloud-host-configure/post.d/10-debug-firewall.yml new file mode 100644 index 000000000..f0fa29978 --- /dev/null +++ b/etc/kayobe/environments/ci-aio/hooks/overcloud-host-configure/post.d/10-debug-firewall.yml @@ -0,0 +1,9 @@ +--- + +- hosts: overcloud + gather_facts: false + tasks: + - name: Configure firewalld to log blocked traffic + command: firewall-cmd --set-log-denied=all + become: true + when: firewalld_enabled | bool diff --git a/etc/kayobe/environments/ci-aio/kolla/globals.yml b/etc/kayobe/environments/ci-aio/kolla/globals.yml index 3967a5075..a54a5468f 100644 --- a/etc/kayobe/environments/ci-aio/kolla/globals.yml +++ b/etc/kayobe/environments/ci-aio/kolla/globals.yml @@ -14,3 +14,6 @@ opensearch_heap_size: 200m # Increase Grafana timeout grafana_start_first_node_retries: 20 + +# Open up ports in firewalld for services on the public API network. +enable_external_api_firewalld: "{{ controller_firewalld_enabled | bool }}" diff --git a/etc/kayobe/environments/ci-aio/networks.yml b/etc/kayobe/environments/ci-aio/networks.yml index 216696eaa..e3cc4d43d 100644 --- a/etc/kayobe/environments/ci-aio/networks.yml +++ b/etc/kayobe/environments/ci-aio/networks.yml @@ -89,6 +89,7 @@ aio_neutron_allocation_pool_end: 192.168.33.127 aio_inspection_allocation_pool_start: 192.168.33.128 aio_inspection_allocation_pool_end: 192.168.33.254 aio_vip_address: 192.168.33.2 +aio_zone: aio ############################################################################### # Network virtual patch link configuration. diff --git a/etc/kayobe/environments/ci-aio/stackhpc-ci.yml b/etc/kayobe/environments/ci-aio/stackhpc-ci.yml index 3e4ca8fee..50af5d160 100644 --- a/etc/kayobe/environments/ci-aio/stackhpc-ci.yml +++ b/etc/kayobe/environments/ci-aio/stackhpc-ci.yml @@ -30,6 +30,7 @@ stackhpc_repo_ubuntu_jammy_security_version: "{{ stackhpc_pulp_repo_ubuntu_jammy stackhpc_repo_ubuntu_jammy_cve_2024_6387_version: "" stackhpc_repo_ubuntu_cloud_archive_version: "{{ stackhpc_pulp_repo_ubuntu_cloud_archive_version }}" stackhpc_repo_docker_ce_ubuntu_jammy_version: "{{ stackhpc_pulp_repo_docker_ce_ubuntu_jammy_version }}" +stackhpc_repo_ceph_reef_debian_version: "{{ stackhpc_pulp_repo_ceph_reef_debian_version }}" stackhpc_repo_centos_stream_9_nfv_openvswitch_version: "{{ stackhpc_pulp_repo_centos_stream_9_nfv_openvswitch_version }}" stackhpc_repo_centos_stream_9_openstack_caracal_version: "{{ stackhpc_pulp_repo_centos_stream_9_openstack_caracal_version }}" stackhpc_repo_centos_stream_9_opstools_version: "{{ stackhpc_pulp_repo_centos_stream_9_opstools_version }}" diff --git a/etc/kayobe/environments/ci-aio/tempest.yml b/etc/kayobe/environments/ci-aio/tempest.yml new file mode 100644 index 000000000..766776417 --- /dev/null +++ b/etc/kayobe/environments/ci-aio/tempest.yml @@ -0,0 +1,3 @@ +--- +# Show Rally output on failure. +rally_no_sensitive_log: false diff --git a/etc/kayobe/environments/ci-multinode/compute.yml b/etc/kayobe/environments/ci-multinode/compute.yml index 7e7701cf0..a00207b41 100644 --- a/etc/kayobe/environments/ci-multinode/compute.yml +++ b/etc/kayobe/environments/ci-multinode/compute.yml @@ -4,3 +4,26 @@ compute_bootstrap_user: "{{ os_distribution if os_distribution == 'ubuntu' else # format. compute_lvm_groups: - "{{ stackhpc_lvm_group_rootvg }}" + +############################################################################### +# Compute node firewalld configuration. + +# Whether to install and enable firewalld. +compute_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +compute_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +compute_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +compute_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/controllers.yml b/etc/kayobe/environments/ci-multinode/controllers.yml index 73c31c27f..173bcc371 100644 --- a/etc/kayobe/environments/ci-multinode/controllers.yml +++ b/etc/kayobe/environments/ci-multinode/controllers.yml @@ -4,3 +4,27 @@ controller_bootstrap_user: "{{ os_distribution if os_distribution == 'ubuntu' el # format. controller_lvm_groups: - "{{ stackhpc_lvm_group_rootvg }}" + + +############################################################################### +# Controller node firewalld configuration. + +# Whether to install and enable firewalld. +controller_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +controller_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +controller_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +controller_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/globals.yml b/etc/kayobe/environments/ci-multinode/globals.yml index 9fe415ecd..de497cb84 100644 --- a/etc/kayobe/environments/ci-multinode/globals.yml +++ b/etc/kayobe/environments/ci-multinode/globals.yml @@ -60,6 +60,9 @@ os_release: >- stackhpc_write_barbican_role_id_to_file: true stackhpc_barbican_role_id_file_path: "/tmp/barbican-role-id" +# Enable rebooting to update SELinux state +selinux_do_reboot: true + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/environments/ci-multinode/infra-vms.yml b/etc/kayobe/environments/ci-multinode/infra-vms.yml index c388a3c73..33bb0b91e 100644 --- a/etc/kayobe/environments/ci-multinode/infra-vms.yml +++ b/etc/kayobe/environments/ci-multinode/infra-vms.yml @@ -4,3 +4,26 @@ infra_vm_bootstrap_user: "{{ os_distribution if os_distribution == 'ubuntu' else # format. infra_vm_lvm_groups: - "{{ stackhpc_lvm_group_rootvg }}" + +############################################################################### +# Infrastructure VM node firewalld configuration + +# Whether to install and enable firewalld. +infra_vm_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +infra_vm_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +infra_vm_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +infra_vm_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/inventory/group_vars/all/firewall.yml b/etc/kayobe/environments/ci-multinode/inventory/group_vars/all/firewall.yml new file mode 100644 index 000000000..ca1f9b475 --- /dev/null +++ b/etc/kayobe/environments/ci-multinode/inventory/group_vars/all/firewall.yml @@ -0,0 +1,7 @@ +--- + +stackhpc_firewalld_rules_extra: + - port: "{{ vxlan_dstport }}/udp" + network: "{{ admin_oc_net_name }}" + zone: "{{ admin_oc_net_name | net_zone }}" + state: enabled diff --git a/etc/kayobe/environments/ci-multinode/kolla/globals.yml b/etc/kayobe/environments/ci-multinode/kolla/globals.yml index c74f385e4..2fe551d8d 100644 --- a/etc/kayobe/environments/ci-multinode/kolla/globals.yml +++ b/etc/kayobe/environments/ci-multinode/kolla/globals.yml @@ -63,3 +63,7 @@ designate_ns_record: designate_backend: "bind9" designate_recursion: "yes" designate_forwarders_addresses: "1.1.1.1; 8.8.8.8" + +# Open up ports in firewalld for services on the public API network. +enable_external_api_firewalld: true +external_api_firewalld_zone: "{{ public_net_name | net_zone }}" diff --git a/etc/kayobe/environments/ci-multinode/monitoring.yml b/etc/kayobe/environments/ci-multinode/monitoring.yml new file mode 100644 index 000000000..fa9bbf0be --- /dev/null +++ b/etc/kayobe/environments/ci-multinode/monitoring.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# monitoring node firewalld configuration. + +# Whether to install and enable firewalld. +monitoring_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +monitoring_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +monitoring_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +monitoring_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/networks.yml b/etc/kayobe/environments/ci-multinode/networks.yml index c0a7ff69e..59d3760b9 100644 --- a/etc/kayobe/environments/ci-multinode/networks.yml +++ b/etc/kayobe/environments/ci-multinode/networks.yml @@ -71,12 +71,16 @@ storage_mgmt_net_name: storage_mgmt ############################################################################### # Network definitions. +# Admin overcloud network +admin_oc_zone: "admin_oc" + # Internal network internal_cidr: 192.168.37.0/24 internal_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" internal_allocation_pool_start: 192.168.37.3 internal_allocation_pool_end: 192.168.37.254 internal_vlan: 101 +internal_zone: "trusted" # External network external_cidr: 192.168.38.0/24 @@ -84,6 +88,7 @@ external_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" external_allocation_pool_start: 192.168.38.3 external_allocation_pool_end: 192.168.38.128 external_vlan: 102 +external_zone: "external" # Public network public_cidr: 192.168.39.0/24 @@ -91,6 +96,7 @@ public_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" public_allocation_pool_start: 192.168.39.3 public_allocation_pool_end: 192.168.39.254 public_vlan: 103 +public_zone: "public" # Tunnel network tunnel_cidr: 192.168.40.0/24 @@ -98,6 +104,7 @@ tunnel_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" tunnel_allocation_pool_start: 192.168.40.3 tunnel_allocation_pool_end: 192.168.40.254 tunnel_vlan: 104 +tunnel_zone: "tunnel" # Storage network storage_cidr: 192.168.41.0/24 @@ -105,6 +112,7 @@ storage_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" storage_allocation_pool_start: 192.168.41.3 storage_allocation_pool_end: 192.168.41.254 storage_vlan: 105 +storage_zone: "storage" # Storage management network # NOTE: Skipping the .42 subnet to avoid a collision with a popular number. @@ -113,6 +121,7 @@ storage_mgmt_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" storage_mgmt_allocation_pool_start: 192.168.43.3 storage_mgmt_allocation_pool_end: 192.168.43.254 storage_mgmt_vlan: 106 +storage_mgmt_zone: "storage_mgmt" # Provision overcloud network provision_oc_cidr: 192.168.33.0/24 @@ -120,6 +129,7 @@ provision_oc_mtu: "{{ ansible_facts.default_ipv4.mtu - 50 }}" provision_oc_allocation_pool_start: 192.168.33.128 provision_oc_allocation_pool_end: 192.168.33.254 provision_oc_vlan: 107 +provision_oc_zone: "provision_oc" ############################################################################### # Network virtual patch link configuration. diff --git a/etc/kayobe/environments/ci-multinode/seed-hypervisor.yml b/etc/kayobe/environments/ci-multinode/seed-hypervisor.yml new file mode 100644 index 000000000..d64b776fc --- /dev/null +++ b/etc/kayobe/environments/ci-multinode/seed-hypervisor.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# seed_hypervisor node firewalld configuration. + +# Whether to install and enable firewalld. +seed_hypervisor_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +seed_hypervisor_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +seed_hypervisor_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +seed_hypervisor_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/seed.yml b/etc/kayobe/environments/ci-multinode/seed.yml index bb9e3c6bf..630f9a849 100644 --- a/etc/kayobe/environments/ci-multinode/seed.yml +++ b/etc/kayobe/environments/ci-multinode/seed.yml @@ -27,3 +27,26 @@ snat_rules_manila: source_ip: "{{ ansible_facts[storage_interface].ipv4.address | default }}" # Only add the storage snat rule if we are using manila-cephfs. snat_rules: "{{ snat_rules_default + snat_rules_manila if (kolla_enable_manila | bool and kolla_enable_manila_backend_cephfs_native | bool) else snat_rules_default }}" + +############################################################################### +# seed node firewalld configuration. + +# Whether to install and enable firewalld. +seed_firewalld_enabled: "{{ kolla_enable_ovn | bool }}" + +# A list of zones to create. Each item is a dict containing a 'zone' item. +seed_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +seed_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +seed_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/stackhpc-ci.yml b/etc/kayobe/environments/ci-multinode/stackhpc-ci.yml index e75b7589b..6e10b63ea 100644 --- a/etc/kayobe/environments/ci-multinode/stackhpc-ci.yml +++ b/etc/kayobe/environments/ci-multinode/stackhpc-ci.yml @@ -30,6 +30,7 @@ stackhpc_repo_ubuntu_jammy_security_version: "{{ stackhpc_pulp_repo_ubuntu_jammy stackhpc_repo_ubuntu_jammy_cve_2024_6387_version: "" stackhpc_repo_ubuntu_cloud_archive_version: "{{ stackhpc_pulp_repo_ubuntu_cloud_archive_version }}" stackhpc_repo_docker_ce_ubuntu_jammy_version: "{{ stackhpc_pulp_repo_docker_ce_ubuntu_jammy_version }}" +stackhpc_repo_ceph_reef_debian_version: "{{ stackhpc_pulp_repo_ceph_reef_debian_version }}" stackhpc_repo_centos_stream_9_nfv_openvswitch_version: "{{ stackhpc_pulp_repo_centos_stream_9_nfv_openvswitch_version }}" stackhpc_repo_centos_stream_9_openstack_caracal_version: "{{ stackhpc_pulp_repo_centos_stream_9_openstack_caracal_version }}" stackhpc_repo_centos_stream_9_opstools_version: "{{ stackhpc_pulp_repo_centos_stream_9_opstools_version }}" diff --git a/etc/kayobe/environments/ci-multinode/storage.yml b/etc/kayobe/environments/ci-multinode/storage.yml index b152af472..11f7cf71e 100644 --- a/etc/kayobe/environments/ci-multinode/storage.yml +++ b/etc/kayobe/environments/ci-multinode/storage.yml @@ -4,3 +4,26 @@ storage_bootstrap_user: "{{ os_distribution if os_distribution == 'ubuntu' else # format. storage_lvm_groups: - "{{ stackhpc_lvm_group_rootvg }}" + +############################################################################### +# storage node firewalld configuration. + +# Whether to install and enable firewalld. +storage_firewalld_enabled: true + +# A list of zones to create. Each item is a dict containing a 'zone' item. +storage_firewalld_zones: "{{ stackhpc_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case +# the default zone will not be changed. +# Predefined zones are listed here: +# https://firewalld.org/documentation/zone/predefined-zones.html +storage_firewalld_default_zone: trusted + +# A list of firewall rules to apply. Each item is a dict containing +# arguments to pass to the firewalld module. Arguments are omitted if not +# provided, with the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +storage_firewalld_rules: "{{ stackhpc_firewalld_rules }}" diff --git a/etc/kayobe/environments/ci-multinode/tempest.yml b/etc/kayobe/environments/ci-multinode/tempest.yml index 93a7cdfe2..0657946bb 100644 --- a/etc/kayobe/environments/ci-multinode/tempest.yml +++ b/etc/kayobe/environments/ci-multinode/tempest.yml @@ -1,3 +1,6 @@ --- +# Show Rally output on failure. +rally_no_sensitive_log: false + # Add the Vault CA certificate to the rally container when running tempest. tempest_cacert: "{{ kayobe_env_config_path }}/kolla/certificates/ca/vault.crt" diff --git a/etc/kayobe/inventory/group_vars/all/firewall b/etc/kayobe/inventory/group_vars/all/firewall new file mode 100644 index 000000000..488e95b65 --- /dev/null +++ b/etc/kayobe/inventory/group_vars/all/firewall @@ -0,0 +1,330 @@ +--- +######################################### +# StackHPC default firewall configuration +######################################### +# This file contains the reference StackHPC firewalld rule implementation. It +# is designed to cover as many services and use cases as possible, however may +# not be compatible with all deployments. For more information, see the +# firewall page in the configuration section of the SKC docs. + +# A templated list of firewalld zones, according to the host's network +# interfaces. +# This variable can be used to set any *_firewalld_zones kayobe variable e.g. +# compute_firewalld_zones: "{{ stackhpc_firewalld_zones }}" in compute.yml +stackhpc_firewalld_zones: | + {% set network_zones = [] %} + {% for network in network_interfaces %} + {% if network | net_zone %} + {% set _ = network_zones.append({'zone': network | net_zone }) %} + {% endif %} + {% endfor %} + {{ network_zones | unique | list }} + +# A templated list of firewalld rules, according to the enabled services, +# host's group membership, and host's network configuration. +# This variable can be used to set any *_firewalld_rules kayobe variable e.g. +# compute_firewalld_rules: "{{ stackhpc_firewalld_rules }}" in compute.yml +stackhpc_firewalld_rules: | + {% set stackhpc_firewalld_rules_verified = stackhpc_firewalld_rules_extra %} + {% for rule in stackhpc_firewalld_rules_default | unique %} + {% if rule | ansible.utils.remove_keys('state') not in stackhpc_firewalld_rules_verified | map('ansible.utils.remove_keys', 'state') | default([]) %} + {% if rule.network is not defined %} + {% set _ = stackhpc_firewalld_rules_verified.append(rule) %} + {% elif rule.network in network_interfaces and rule.network | net_zone %} + {% set _ = stackhpc_firewalld_rules_verified.append(rule) %} + {% endif %} + {% endif %} + {% endfor %} + {{ stackhpc_firewalld_rules_verified | list }} + +# A single list of default firewall rules, combining the enabled rules from +# stackhpc_firewalld_rules_template. This variable is designed to fail to +# template if any conflicting rules are found. +stackhpc_firewalld_rules_default: | + {% set stackhpc_firewalld_rules_formatted = [] %} + {% for ruleset in stackhpc_firewalld_rules_template %} + {% if ruleset.enabled | bool %} + {% for rule in ruleset.rules %} + {% if rule.zone is not defined %} + {% set rule = rule | combine({'zone': rule.network | net_zone }) %} + {% endif %} + {% if rule not in stackhpc_firewalld_rules_formatted %} + {% if rule | ansible.utils.remove_keys('state') in stackhpc_firewalld_rules_formatted | map('ansible.utils.remove_keys', 'state') %} + {% set _ = stackhpc_firewalld_rules_formatted.append({'state':'failure'}) %} + {% elif rule.network is not defined %} + {% set _ = stackhpc_firewalld_rules_formatted.append(rule) %} + {% elif rule.network in network_interfaces and rule.network | net_zone %} + {% set _ = stackhpc_firewalld_rules_formatted.append(rule) %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + {{ undef(hint='ERROR: Conflicting firewall rules found') if ({'state':'failure'} in stackhpc_firewalld_rules_formatted) else stackhpc_firewalld_rules_formatted }} + +stackhpc_firewalld_rules_template: | + {{ stackhpc_common_firewalld_rules_template + + (stackhpc_controller_firewalld_rules_template if 'controllers' in group_names else []) + + (stackhpc_compute_firewalld_rules_template if 'compute' in group_names else []) + + (stackhpc_ceph_firewalld_rules_template if 'ceph' in group_names else []) + + (stackhpc_monitoring_firewalld_rules_template if 'monitoring' in group_names else []) + + (stackhpc_seed_firewalld_rules_template if 'seed' in group_names else []) + + (stackhpc_seed_hypervisor_firewalld_rules_template if 'seed-hypervisor' in group_names else []) + + (stackhpc_wazuh_manager_infra_vm_firewalld_rules_template if 'wazuh-manager' in group_names else []) + + (stackhpc_ansible_control_infra_vm_firewalld_rules_template if inventory_hostname == 'localhost' else []) }} + +############################################################################### +# Debug Vars + +# This variable is not applied anywhere. It exists for debugging purpouses +# only. Print it with: +# kayobe configuration dump --var-name stackhpc_firewalld_rules_debug +stackhpc_firewalld_rules_debug: | + {% set stackhpc_firewalld_services_debug = [] %} + {% for rule in stackhpc_firewalld_rules %} + {% if rule.service is defined %} + {% set _ = stackhpc_firewalld_services_debug.append(rule.service + ' ' + rule.state + ' ' + rule.zone | default()) %} + {% else %} + {% set _ = stackhpc_firewalld_services_debug.append(rule.port + ' ' + rule.state + ' ' + rule.zone | default()) %} + {% endif %} + {% endfor %} + {{ stackhpc_firewalld_services_debug | list }} + +############################################################################### +# Extra firewalld rules + +# Extra firewalld rules. Each item is a dict containing arguments to pass to +# the firewalld module. +# These rules have higher precidence than the +# default rules and are not validated before being applied. Use with caution. +# NOTE: The format of this variable is NOT the same as the +# stackhpc_*_firewalld_rules_template variables found elsewhere in this file. +stackhpc_firewalld_rules_extra: [] + +############################################################################### +# Common firewalld rules + +stackhpc_common_firewalld_rules_template: + # Common + - rules: + - service: ssh + network: "{{ admin_oc_net_name }}" + state: enabled + enabled: true + # Cockpit, dhcpv6-client, and SSH are enabled by default in firewalld + - rules: + - service: cockpit + zone: public + state: disabled + - service: dhcpv6-client + zone: public + state: disabled + - service: ssh + zone: public + state: disabled + enabled: "{{ admin_oc_net_name | net_zone != 'public' }}" + +############################################################################### +# Controller firewalld rules + +stackhpc_controller_firewalld_rules_template: + # Overcloud Ironic + - rules: + # Ironic inspector API + - port: 5050/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + # Ironic API + - port: 6385/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 8089/tcp + network: "{{ provision_wl_net_name }}" + state: enabled + - service: dhcp + network: "{{ provision_wl_net_name }}" + state: enabled + - service: ntp + network: "{{ provision_wl_net_name }}" + state: enabled + - service: tftp + network: "{{ provision_wl_net_name }}" + state: enabled + enabled: "{{ kolla_enable_ironic | bool }}" + # Designate + - rules: + - port: 53/tcp + network: "{{ public_net_name }}" + state: enabled + - port: 53/udp + network: "{{ public_net_name }}" + state: enabled + - port: 5354/tcp + network: "{{ public_net_name }}" + state: enabled + - port: 5354/udp + network: "{{ public_net_name }}" + state: enabled + enabled: "{{ kolla_enable_designate | bool }}" + # GENEVE + - rules: + - port: 6081/udp + network: "{{ tunnel_net_name }}" + state: enabled + enabled: "{{ 'geneve' in (kolla_neutron_ml2_type_drivers + kolla_neutron_ml2_tenant_network_types) and 'network' in group_names }}" + # VXLAN + - rules: + - port: 4789/udp + network: "{{ tunnel_net_name }}" + state: enabled + enabled: "{{ 'vxlan' in (kolla_neutron_ml2_type_drivers + kolla_neutron_ml2_tenant_network_types) and 'network' in group_names }}" + # Octavia + - rules: + - port: 5555/udp + network: "{{ octavia_net_name | default() }}" + state: enabled + enabled: "{{ kolla_enable_octavia | bool and octavia_net_name is not none }}" + # DHCP, Was required to create VMs at a customer site, need to work out condition + - rules: + - port: 67/udp + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: true + +############################################################################### +# Compute firewalld rules + +stackhpc_compute_firewalld_rules_template: + # GENEVE + - rules: + - port: 6081/udp + network: "{{ tunnel_net_name }}" + state: enabled + enabled: "{{ ('geneve' in (kolla_neutron_ml2_type_drivers + kolla_neutron_ml2_tenant_network_types)) | bool }}" + # VXLAN + - rules: + - port: 4789/udp + network: "{{ tunnel_net_name }}" + state: enabled + enabled: "{{ ('vxlan' in (kolla_neutron_ml2_type_drivers + kolla_neutron_ml2_tenant_network_types)) | bool }}" + +############################################################################### +# Ceph firewalld rules + +# Port on which radosgw is exposed. +# See: https://stackhpc-kayobe-config.readthedocs.io/en/stackhpc-2024.1/configuration/cephadm.html#rados-gateways +stackhpc_ceph_firewalld_radosgw_port: 8100 + +stackhpc_ceph_firewalld_rules_template: + # Ceph Prometheus exporter + - rules: + - port: 9283/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: "{{ kolla_enable_prometheus_ceph_mgr_exporter | default(false) | bool and 'mgrs' in group_names }}" + # Ceph + - rules: + - service: ceph + network: "{{ storage_net_name }}" + state: enabled + - service: ceph + network: "{{ storage_mgmt_net_name }}" + state: enabled + - service: ceph-mon + network: "{{ storage_net_name }}" + state: "{{ 'enabled' if 'mons' in group_names else 'disabled' }}" + - port: "{{ stackhpc_ceph_firewalld_radosgw_port }}/tcp" + network: "{{ storage_net_name }}" + state: "{{ 'enabled' if 'rgws' in group_names else 'disabled' }}" + enabled: "{{ 'ceph' in group_names }}" + +############################################################################### +# Monitoring firewalld rules + +stackhpc_monitoring_firewalld_rules_template: [] + +############################################################################### +# Infra VM firewalld rules (Wazuh Manager) + +stackhpc_wazuh_manager_infra_vm_firewalld_rules_template: + - rules: + - port: 1514/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 1514/udp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 1515/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 443/tcp + network: "{{ wazuh_dashboard_net_name | default(provision_oc_net_name) }}" + state: enabled + - port: 9200/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 9300-9400/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - port: 55000/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: true + +############################################################################### +# Infra VM firewalld rules (Ansible Control) + +stackhpc_ansible_control_infra_vm_firewalld_rules_template: [] + +############################################################################### +# Seed firewalld rules + +stackhpc_seed_firewalld_rules_template: + # Pulp server + - rules: + - port: "{{ pulp_port }}/tcp" + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: "{{ seed_pulp_container_enabled | bool }}" + # Squid proxy + - rules: + - service: squid + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: "{{ seed_squid_container_enabled | bool }}" + # Ironic + - rules: + # nginx + - port: 8080/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + # Ironic inspector API + - port: 5050/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + # Ironic API + - port: 6385/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + - service: dhcp + network: "{{ provision_oc_net_name }}" + state: enabled + - service: tftp + network: "{{ provision_oc_net_name }}" + state: enabled + - service: ntp + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: true #FIXME: Make rules conditional on Bifrost deployment + # Redfish exporter + - rules: + - port: 9610/tcp + network: "{{ provision_oc_net_name }}" + state: enabled + enabled: "{{ stackhpc_enable_redfish_exporter | default(false) }}" + # TODO: Rules if SNAT enabled on seed + +############################################################################### +# Seed Hypervisor firewalld rules + +stackhpc_seed_hypervisor_firewalld_rules_template: [] diff --git a/etc/kayobe/inventory/group_vars/cis-hardening/cis b/etc/kayobe/inventory/group_vars/cis-hardening/cis index a80571f85..37d01492b 100644 --- a/etc/kayobe/inventory/group_vars/cis-hardening/cis +++ b/etc/kayobe/inventory/group_vars/cis-hardening/cis @@ -55,7 +55,7 @@ rhel9cis_no_world_write_adjust: false rhel9cis_auditd: space_left_action: syslog action_mail_acct: root - admin_space_left_action: halt + admin_space_left_action: syslog max_log_file: 10 max_log_file_action: rotate @@ -157,7 +157,7 @@ ubtu22cis_suid_adjust: false ubtu22cis_auditd: action_mail_acct: root space_left_action: syslog - admin_space_left_action: halt + admin_space_left_action: syslog max_log_file_action: rotate # Max size of audit logs (MB) diff --git a/etc/kayobe/networks.yml b/etc/kayobe/networks.yml index 17c9028c4..e91d2c3bb 100644 --- a/etc/kayobe/networks.yml +++ b/etc/kayobe/networks.yml @@ -59,6 +59,12 @@ # hosts #cleaning_net_name: +# Name of the network used to manage octavia loadbalancers +#octavia_net_name: + +# Name of the network where Wazuh manager is exposed +#wazuh_dashboard_net_name: + ############################################################################### # Network definitions. diff --git a/etc/kayobe/pulp-repo-versions.yml b/etc/kayobe/pulp-repo-versions.yml index c58ee6d24..b11007172 100644 --- a/etc/kayobe/pulp-repo-versions.yml +++ b/etc/kayobe/pulp-repo-versions.yml @@ -59,3 +59,4 @@ stackhpc_pulp_repo_rocky_9_sig_security_common_aarch64_version: 20240927T073838 stackhpc_pulp_repo_ubuntu_cloud_archive_version: 20240911T041957 stackhpc_pulp_repo_ubuntu_jammy_security_version: 20240911T063424 stackhpc_pulp_repo_ubuntu_jammy_version: 20240911T063424 +stackhpc_pulp_repo_ceph_reef_debian_version: 20240925T152022 diff --git a/etc/kayobe/pulp.yml b/etc/kayobe/pulp.yml index aff4d0fdc..fc45f3d0b 100644 --- a/etc/kayobe/pulp.yml +++ b/etc/kayobe/pulp.yml @@ -122,6 +122,14 @@ stackhpc_pulp_deb_repos: components: "stable" required: "{{ stackhpc_pulp_sync_ubuntu_jammy | bool }}" + - name: "Docker CE for Ubuntu Jammy" + url: "{{ stackhpc_release_pulp_content_url }}/ceph/debian-reef/{{ stackhpc_pulp_repo_ceph_reef_debian_version }}" + distribution_name: "docker-ce-for-ubuntu-jammy-" + base_path: "docker-ce/ubuntu-jammy/" + distributions: "jammy" + components: "stable" + required: "{{ stackhpc_pulp_sync_ubuntu_jammy | bool }}" + # Publication format is a subset of distribution. stackhpc_pulp_publication_deb_development: "{{ stackhpc_pulp_distribution_deb_development }}" diff --git a/etc/kayobe/stackhpc.yml b/etc/kayobe/stackhpc.yml index 2f53da724..090eaf3a9 100644 --- a/etc/kayobe/stackhpc.yml +++ b/etc/kayobe/stackhpc.yml @@ -49,6 +49,10 @@ stackhpc_repo_ubuntu_cloud_archive_version: "{{ stackhpc_repo_distribution }}" stackhpc_repo_docker_ce_ubuntu_jammy_url: "{{ stackhpc_repo_mirror_url }}/pulp/content/docker-ce/ubuntu-jammy/{{ stackhpc_repo_docker_ce_ubuntu_jammy_version }}" stackhpc_repo_docker_ce_ubuntu_jammy_version: "{{ stackhpc_repo_distribution }}" +# Ceph Reef for Debian +stackhpc_repo_ceph_reef_debian_url: "{{ stackhpc_repo_mirror_url }}/pulp/content/ceph/debian-reef/{{ stackhpc_repo_ceph_reef_debian_version }}" +stackhpc_repo_ceph_reef_debian_version: "{{ stackhpc_repo_distribution }}" + ############################################################################### # RPMs diff --git a/releasenotes/notes/adds-networking-mlnx-05fdc10ea6132145.yaml b/releasenotes/notes/adds-networking-mlnx-05fdc10ea6132145.yaml index 5f9e3a15b..695ab035c 100644 --- a/releasenotes/notes/adds-networking-mlnx-05fdc10ea6132145.yaml +++ b/releasenotes/notes/adds-networking-mlnx-05fdc10ea6132145.yaml @@ -2,5 +2,5 @@ features: - | Adds the ``networking-mlnx`` mechanism driver to the Neutron Server - container and ``ebrctl``utility to the Nova Compute container. This allows + container and ``ebrctl`` utility to the Nova Compute container. This allows you to use the ``kolla_enable_neutron_mlnx`` feature flag. diff --git a/releasenotes/notes/automated-firewalld-a95e7322fd457259.yaml b/releasenotes/notes/automated-firewalld-a95e7322fd457259.yaml new file mode 100644 index 000000000..4a06e6705 --- /dev/null +++ b/releasenotes/notes/automated-firewalld-a95e7322fd457259.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + A default firewall configuration is now included on an opt-in basis. The + rules are defined under ``etc/kayobe/inventory/group_vars/all/firewall``. + More information can be found `here + `__ diff --git a/releasenotes/notes/fix-neutron-fip-ovn-lb-attach-1457b0fcc2757ed9.yaml b/releasenotes/notes/fix-neutron-fip-ovn-lb-attach-1457b0fcc2757ed9.yaml new file mode 100644 index 000000000..ff8c55bc6 --- /dev/null +++ b/releasenotes/notes/fix-neutron-fip-ovn-lb-attach-1457b0fcc2757ed9.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Bumps Neutron container image tags to fix `bug 2068644 + `__ which could prevent + associating floating IPs with OVN-based load balancers. diff --git a/releasenotes/notes/fixes-file-descriptor-leak-3d5c9340e5983e9c.yaml b/releasenotes/notes/fixes-file-descriptor-leak-3d5c9340e5983e9c.yaml new file mode 100644 index 000000000..415b4d76d --- /dev/null +++ b/releasenotes/notes/fixes-file-descriptor-leak-3d5c9340e5983e9c.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixes a file descriptor leak in networking-mlnx which prevented VMs using + Infiniband virtual functions from provisioning after a period of time. + - | + Fixes ``KeyError: ip_version`` in networking-mlnx when used in conjuction + with OVN mechanism driver. diff --git a/releasenotes/notes/tempest-concurrency-1c86c0ef90e104f6.yaml b/releasenotes/notes/tempest-concurrency-1c86c0ef90e104f6.yaml new file mode 100644 index 000000000..db9658694 --- /dev/null +++ b/releasenotes/notes/tempest-concurrency-1c86c0ef90e104f6.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The default Tempest concurrency has been increased from 2 to 16. + This is often easily achievable in production systems.