diff --git a/Dockerfile.ansible b/Dockerfile.ansible new file mode 100644 index 00000000000..abd061a22f8 --- /dev/null +++ b/Dockerfile.ansible @@ -0,0 +1,44 @@ +ARG REGISTRY +ARG VERSION + +############################################################################### +# final is our slim image with minimal layers and tools +############################################################################### +FROM ${REGISTRY}/ubi9/python-311:1-66 AS final +ARG PIPX_VERSION=1.5.0 \ + ANSIBLE_VERSION=9.5.1 \ + AZURE_CLI_VERSION=2.60.0 \ + ANSIBLE_AZCOLLECTION_VERSION=2.3.0 + +# Have Ansible to print task timing information +ENV ANSIBLE_CALLBACKS_ENABLED=profile_tasks +USER root +COPY ansible /ansible +WORKDIR /ansible + +# Using pipx here because ansible and azure-cli have differing required core Azure modules +# They each need a separate venv to avoid collisions +RUN ${APP_ROOT}/bin/pip install "pipx==${PIPX_VERSION}" && \ + ${APP_ROOT}/bin/pipx install "azure-cli==${AZURE_CLI_VERSION}" && \ + ${APP_ROOT}/bin/pipx install "ansible==${ANSIBLE_VERSION}" --include-deps && \ + ${APP_ROOT}/bin/pipx runpip ansible install -r "/ansible/ansible-requirements.txt" && \ + ${HOME}/.local/bin/ansible-galaxy collection install "azure.azcollection==${ANSIBLE_AZCOLLECTION_VERSION}" && \ + ${APP_ROOT}/bin/pipx runpip ansible install -r "${HOME}/.ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt" && \ + ${APP_ROOT}/bin/pipx list && \ + rm -rf ${HOME}/.ansible ${HOME}/.azure + +############################################################################### +# linter takes the final image and injects ansible-lint. Ansible-lint needs +# ansible itself and all ansible modules and python modules installed to correctly lint +############################################################################### +FROM final AS linter +ARG ANSIBLE_LINT_VERSION=24.6.0 +RUN ${APP_ROOT}/bin/pipx inject ansible --include-apps "ansible-lint==${ANSIBLE_LINT_VERSION}" && \ + ${HOME}/.local/bin/ansible-lint -v -c /ansible/.ansible_lint.yaml --project-dir /ansible --format sarif | tee /opt/app-root/src/sarif.txt + +############################################################################### +# Re-FROM final so that it's the output container +############################################################################### +FROM final +COPY --from=linter /opt/app-root/src/sarif.txt /opt/app-root/src/sarif.txt +ENTRYPOINT ["/opt/app-root/src/.local/bin/ansible-playbook"] diff --git a/Makefile b/Makefile index 032896fca46..08cd043e1c7 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ ARO_IMAGE_BASE = ${RP_IMAGE_ACR}.azurecr.io/aro E2E_FLAGS ?= -test.v --ginkgo.v --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml GO_FLAGS ?= -tags=containers_image_openpgp,exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper NO_CACHE ?= true +PODMAN_VOLUME_OVERLAY=$(shell if [[ $$(getenforce) == "Enforcing" ]]; then echo ":O"; else echo ""; fi 2>/dev/null) export GOFLAGS=$(GO_FLAGS) @@ -285,4 +286,20 @@ vendor: install-go-tools: go install ${GOTESTSUM} -.PHONY: admin.kubeconfig aks.kubeconfig aro az ci-rp ci-clean clean client deploy dev-config.yaml discoverycache fix-macos-vendor generate image-aro-multistage image-fluentbit image-proxy init-contrib lint-go runlocal-rp proxy publish-image-aro-multistage publish-image-fluentbit publish-image-proxy secrets secrets-update e2e.test tunnel test-e2e test-go test-python vendor build-all validate-go unit-test-go coverage-go validate-fips install-go-tools +ansible-image: + docker image exists aro-ansible:$(VERSION) || docker build . -f Dockerfile.ansible --build-arg REGISTRY=$(REGISTRY) --build-arg VERSION=$(VERSION) --no-cache=$(NO_CACHE) --tag aro-ansible:$(VERSION) + +LOCATION := eastus +CLUSTERPREFIX := $(USER) +CLUSTERPATTERN := basic +CLEANUP := False +SSH_CONFIG_DIR := $(HOME)/.ssh/ +SSH_KEY_BASENAME := id_rsa +# Note: When running this from a pipeline, don't mount the ansible directory, use the one baked into the ansible image. This is for reproducibility purposes. +cluster: ansible-image + docker run --rm -it -v $${AZURE_CONFIG_DIR:-~/.azure}:/opt/app-root/src/.azure$(PODMAN_VOLUME_OVERLAY) -v ./ansible:/ansible$(PODMAN_VOLUME_OVERLAY) -v $(SSH_CONFIG_DIR):/root/.ssh$(PODMAN_VOLUME_OVERLAY) aro-ansible:$(VERSION) -i hosts.yaml -l $(CLUSTERPATTERN) -e location=$(LOCATION) -e CLUSTERPREFIX=$(CLUSTERPREFIX) -e CLEANUP=$(CLEANUP) -e SSH_KEY_BASENAME=$(SSH_KEY_BASENAME) clusters.yaml + +lint-ansible: + cd ansible; ansible-lint -c .ansible_lint.yaml + +.PHONY: admin.kubeconfig aks.kubeconfig aro az ci-rp ci-clean clean client deploy dev-config.yaml discoverycache fix-macos-vendor generate image-aro-multistage image-fluentbit image-proxy init-contrib lint-go runlocal-rp proxy publish-image-aro-multistage publish-image-fluentbit publish-image-proxy secrets secrets-update e2e.test tunnel test-e2e test-go test-python vendor build-all validate-go unit-test-go coverage-go validate-fips install-go-tools cluster ansible-image cluster-dev lint-ansible diff --git a/ansible/.ansible_lint.yaml b/ansible/.ansible_lint.yaml new file mode 100644 index 00000000000..14f287c194c --- /dev/null +++ b/ansible/.ansible_lint.yaml @@ -0,0 +1,10 @@ +profile: production +exclude_paths: [] +use_default_rules: true +skip_list: + - no-changed-when +enable_list: + - args + - empty-string-compare + - no-same-owner + - name[prefix] diff --git a/ansible/ansible-requirements.txt b/ansible/ansible-requirements.txt new file mode 100644 index 00000000000..eca5e2e4ead --- /dev/null +++ b/ansible/ansible-requirements.txt @@ -0,0 +1 @@ +kubernetes==29.0.0 diff --git a/ansible/clusters.yaml b/ansible/clusters.yaml new file mode 100644 index 00000000000..e9afeef512c --- /dev/null +++ b/ansible/clusters.yaml @@ -0,0 +1,19 @@ +--- +- name: Deploy simple clusters + hosts: simple_clusters + gather_facts: false + serial: "{{ max_simultaneous_clusters | default(1) }}" + environment: + AZURE_CORE_SURVEY_MESSAGE: "false" + roles: + - simple_cluster + - cleanup +- name: Bring your own keys disk encryption + hosts: byok_clusters + gather_facts: false + serial: "{{ max_simultaneous_clusters | default(1) }}" + environment: + AZURE_CORE_SURVEY_MESSAGE: "false" + roles: + - byok_cluster + - cleanup diff --git a/ansible/group_vars/all.yaml b/ansible/group_vars/all.yaml new file mode 100644 index 00000000000..f9b15eae4d0 --- /dev/null +++ b/ansible/group_vars/all.yaml @@ -0,0 +1 @@ +delegation: localhost diff --git a/ansible/hosts.yaml b/ansible/hosts.yaml new file mode 100644 index 00000000000..698de91dbe8 --- /dev/null +++ b/ansible/hosts.yaml @@ -0,0 +1,93 @@ +--- +simple_clusters: + hosts: + basic: + # The simplest possible cluster + name: aro + resource_group: "{{ CLUSTERPREFIX }}-basic-{{ location }}" + enc: + # Basic cluster with encryption-at-host enabled + name: aro-414 + resource_group: "{{ CLUSTERPREFIX }}-enc-{{ location }}-414" + version: 4.14.16 + master_size: Standard_E8s_v5 + master_encryption_at_host: true + worker_size: Standard_D4s_v5 + worker_encryption_at_host: true + sre-shared-cluster: + name: sre-shared-cluster + resource_group: eastus + version: 4.12.25 + cluster_resource_group: aro-a + vars: + network_prefix_cidr: 10.0.0.0/22 + master_cidr: 10.0.0.0/23 + master_size: Standard_D8s_v3 + worker_cidr: 10.0.2.0/23 + worker_size: Standard_D4s_v3 + delegation: localhost + children: + udr_clusters: + private_clusters: + +udr_clusters: + hosts: + udr414: + name: aro-414 + resource_group: "{{ CLUSTERPREFIX }}-udr-{{ location }}-414" + version: "4.14.16" + routes: + - name: Blackhole + address_prefix: 0.0.0.0/0 + next_hop_type: none + udr_no_null414: + name: aro-414 + resource_group: "{{ CLUSTERPREFIX }}-udrnonull-{{ location }}-414" + version: "4.14.16" + routes: + - name: The Interwebs + address_prefix: 0.0.0.0/0 + next_hop_type: internet + udr413: + name: aro-413 + resource_group: "{{ CLUSTERPREFIX }}-udr-{{ location }}-413" + version: "4.13.23" + routes: + - name: Blackhole + address_prefix: 0.0.0.0/0 + next_hop_type: none + vars: + network_prefix_cidr: 10.0.0.0/22 + master_cidr: 10.0.0.0/23 + worker_cidr: 10.0.2.0/23 + apiserver_visibility: Private + ingress_visibility: Private + outbound_type: UserDefinedRouting + +private_clusters: + hosts: + private: + # Private cluster + name: aro + resource_group: "{{ CLUSTERPREFIX }}-private-{{ location }}" + apiserver_visibility: Private + ingress_visibility: Private + network_prefix_cidr: 10.0.0.0/22 + master_cidr: 10.0.0.0/23 + master_size: Standard_D8s_v3 + worker_cidr: 10.0.2.0/23 + worker_size: Standard_D4s_v3 + +byok_clusters: + hosts: + byok: + # Cluster with customer-managed disk encryption key + # https://learn.microsoft.com/en-us/azure/openshift/howto-byok + name: aro-414 + resource_group: "{{ CLUSTERPREFIX }}-byok-{{ location }}-414" + version: 4.14.16 + network_prefix_cidr: 10.0.0.0/22 + master_cidr: 10.0.0.0/23 + master_size: Standard_E8s_v5 + worker_cidr: 10.0.2.0/23 + worker_size: Standard_D4s_v5 diff --git a/ansible/roles/byok_cluster/tasks/main.yaml b/ansible/roles/byok_cluster/tasks/main.yaml new file mode 100644 index 00000000000..2c2f3807595 --- /dev/null +++ b/ansible/roles/byok_cluster/tasks/main.yaml @@ -0,0 +1,103 @@ +- name: Create resource group + ansible.builtin.include_tasks: + file: ../tasks/create_resourcegroup.yaml +- name: Create vnet and subnets + ansible.builtin.include_tasks: + file: ../tasks/create_vnet.yaml + +- name: Generate keyvault name + ansible.builtin.set_fact: + keyvault_name: "byok-{{ lookup('password', '/dev/null chars=ascii_letters,digits') | to_uuid | replace('-', '') | truncate(24 - 5, end='') }}" +- name: Debug keyvault_name + ansible.builtin.debug: + var: keyvault_name +- name: Byok keyvault + delegate_to: localhost + register: byok_keyvault_status + azure.azcollection.azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "{{ keyvault_name }}" + location: "{{ location }}" + enable_purge_protection: true + vault_tenant: "{{ sub_info.tenant_id }}" + sku: + name: standard + family: "A" + access_policies: + - tenant_id: "{{ sub_info.tenant_id }}" + object_id: "{{ currentuser_info.id }}" + keys: ["encrypt", "decrypt", "wrapkey", "unwrapkey", "sign", "verify", "get", "list", "create", "update", "import", "delete", "backup", + "restore", "recover", "purge"] + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" +- name: Debug byok_keyvault_status + ansible.builtin.debug: + var: byok_keyvault_status +- name: Get byok keyvault + delegate_to: localhost + register: byok_keyvault_info + azure.azcollection.azure_rm_keyvault_info: + resource_group: "{{ resource_group }}" + name: "{{ keyvault_name }}" +- name: Debug byok_keyvault_info + ansible.builtin.debug: + var: byok_keyvault_info + +- name: Byok key + delegate_to: localhost + register: byok_keyvault_key_status + azure.azcollection.azure_rm_keyvaultkey: + key_name: "{{ name }}-key" + keyvault_uri: "{{ byok_keyvault_info.keyvaults[0].vault_uri }}" +- name: Debug byok_keyvault_key_status + ansible.builtin.debug: + var: byok_keyvault_key_status +- name: Get byok key + delegate_to: localhost + register: byok_keyvault_key_info + azure.azcollection.azure_rm_keyvaultkey_info: + vault_uri: "{{ byok_keyvault_info.keyvaults[0].vault_uri }}" + name: "{{ name }}-key" +- name: Debug byok_keyvault_key_info + ansible.builtin.debug: + var: byok_keyvault_key_info + +- name: Byok disk encryption set + delegate_to: localhost + register: byok_des_status + azure.azcollection.azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ name }}-des" + location: "{{ location }}" + source_vault: "{{ keyvault_name }}" + key_url: "{{ byok_keyvault_key_info['keys'][0].kid }}" +- name: Debug byok_des_status + ansible.builtin.debug: + var: byok_des_status + +- name: Byok keyvault + delegate_to: localhost + register: byok_keyvault_status + azure.azcollection.azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "{{ keyvault_name }}" + location: "{{ location }}" + enable_purge_protection: true + vault_tenant: "{{ sub_info.tenant_id }}" + sku: + name: standard + family: "A" + access_policies: + - tenant_id: "{{ sub_info.tenant_id }}" + object_id: "{{ currentuser_info.id }}" + keys: ["encrypt", "decrypt", "wrapkey", "unwrapkey", "sign", "verify", "get", "list", "create", "update", "import", "delete", "backup", + "restore", "recover", "purge"] + - tenant_id: "{{ byok_des_status.state.identity.tenant_id }}" + object_id: "{{ byok_des_status.state.identity.principal_id }}" + keys: ["wrapkey", "unwrapkey", "get"] + +- name: Create aro cluster + ansible.builtin.include_tasks: + file: ../tasks/create_aro_cluster.yaml diff --git a/ansible/roles/cleanup/tasks/main.yaml b/ansible/roles/cleanup/tasks/main.yaml new file mode 100644 index 00000000000..578a2af0641 --- /dev/null +++ b/ansible/roles/cleanup/tasks/main.yaml @@ -0,0 +1,7 @@ +--- +- name: Delete aro cluster + ansible.builtin.include_tasks: + file: ../tasks/delete_aro_cluster.yaml +- name: Create resource group + ansible.builtin.include_tasks: + file: ../tasks/delete_resourcegroup.yaml diff --git a/ansible/roles/pucm/tasks/main.yaml b/ansible/roles/pucm/tasks/main.yaml new file mode 100644 index 00000000000..5c3d3bea8ac --- /dev/null +++ b/ansible/roles/pucm/tasks/main.yaml @@ -0,0 +1,14 @@ +--- +- name: Az aro update + ansible.builtin.command: + argv: [az, aro, update, "--name={{ name }}", "--resource-group={{ resource_group }}", -o=yaml] + delegate_to: localhost +- name: Wait for provisioningState to become Succeeded + azure.azcollection.azure_rm_openshiftmanagedcluster_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + delegate_to: localhost + register: cluster_status + until: cluster_status.clusters.properties.provisioningState == 'Succeeded' + retries: 6 + delay: 10 diff --git a/ansible/roles/refresh_credentials/tasks/main.yaml b/ansible/roles/refresh_credentials/tasks/main.yaml new file mode 100644 index 00000000000..d317ea28dcd --- /dev/null +++ b/ansible/roles/refresh_credentials/tasks/main.yaml @@ -0,0 +1,14 @@ +--- +- name: Az aro update refresh-credentials + ansible.builtin.command: + argv: [az, aro, update, "--name={{ name }}", "--resource-group={{ resource_group }}", --refresh-credentials, -o=yaml] + delegate_to: localhost +- name: Wait for provisioningState to become Succeeded + azure.azcollection.azure_rm_openshiftmanagedcluster_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + delegate_to: localhost + register: cluster_status + until: cluster_status.clusters.properties.provisioningState == 'Succeeded' + retries: 6 + delay: 10 diff --git a/ansible/roles/simple_cluster/tasks/main.yaml b/ansible/roles/simple_cluster/tasks/main.yaml new file mode 100644 index 00000000000..d0c91c80994 --- /dev/null +++ b/ansible/roles/simple_cluster/tasks/main.yaml @@ -0,0 +1,9 @@ +- name: Create resource group + ansible.builtin.include_tasks: + file: ../tasks/create_resourcegroup.yaml +- name: Create vnet and subnets + ansible.builtin.include_tasks: + file: ../tasks/create_vnet.yaml +- name: Create aro cluster + ansible.builtin.include_tasks: + file: ../tasks/create_aro_cluster.yaml diff --git a/ansible/tasks/create_aro_cluster.yaml b/ansible/tasks/create_aro_cluster.yaml new file mode 100644 index 00000000000..7ede35b07d3 --- /dev/null +++ b/ansible/tasks/create_aro_cluster.yaml @@ -0,0 +1,134 @@ +--- +- name: create_aro_cluster | Set up a jumphost for private clusters + when: apiserver_visibility is defined and apiserver_visibility == "Private" + ansible.builtin.include_tasks: + file: ../tasks/create_jumphost.yaml + +# TODO: Create clusters with azure_rm_openshiftmanagedcluster if possible +# - name: Create cluster service principal +# azure.azcollection.azure_rm_adserviceprincipal: +# delegate_to: localhost +# app_id: "{{ name | to_uuid }}" +# tenant: "{{ sub_info.tenant_id }}" +# register: csp_info +# - ansible.builtin.debug: var=csp_info +# - name: Create cluster service principal +# ansible.builtin.command: +# delegate_to: localhost +# argv: [ +# "az", "ad", "sp", "create-for-rbac", +# "-n", "{{ name }}-sp", +# "--role", "contributor", +# "--scopes", "{{ rg_info.state.id }}" +# ] +# register: sp_info +# - ansible.builtin.debug: var=sp_info +# - name: Create aro cluster +# azure.azcollection.azure_rm_openshiftmanagedcluster: +# delegate_to: localhost +# name: "{{ name }}" +# resource_group: "{{ resource_group }}" +# location: "{{ location }}" +# service_principal_profile: +# client_id: "{{ csp_info.state.client_id }}" +# client_secret: "{{ csp_info.state.client_secret }}" +# worker_profiles: +# - name: "worker" +# vm_size: "Standard_D4s_v3" +# subnet_id: "{{ worker_subnet_state.state.id }}" +# master_profile: +# vm_size: "Standard_D4s_v3" +# subnet_id: "{{ master_subnet_state.state.id }}" +- name: create_aro_cluster | Check if cluster already exists + ansible.builtin.command: + argv: ["az", "aro", "show", "--name={{ name }}", "--resource-group={{ resource_group }}", "-o=yaml"] + failed_when: "'provisioningState: Succeeded' not in aro_existing_cluster.stdout" + delegate_to: localhost + register: aro_existing_cluster + ignore_errors: true +- name: create_aro_cluster | Create aro cluster + when: aro_existing_cluster is not success + ansible.builtin.command: + argv: "{{ argv | reject('equalto', omit) | list }}" + vars: + argv: + - az + - aro + - create + - --name={{ name }} + - --resource-group={{ resource_group }} + - --location={{ location }} + - --master-subnet=master + - --subscription={{ sub_info.subscription_id }} + - --vnet=aro-vnet + - --worker-subnet=worker + - "{% if apiserver_visibility is defined %}--apiserver-visibility={{ apiserver_visibility }}{% else %}{{ omit }}{% endif %}" + - "{% if byok_des_status is defined and byok_des_status.state.provisioning_state == 'Succeeded' + %}--disk-encryption-set={{ byok_des_status.state.id }}{% else %}{{ omit }}{% endif %}" + - "{% if cluster_resource_group is defined %}--cluster-resource-group={{ cluster_resource_group }}{% else %}{{ omit }}{% endif %}" + - "{% if domain is defined %}--domain={{ domain }}{% else %}{{ omit }}{% endif %}" + - "{% if enable_preconfigured_nsg is defined %}--enable-preconfigured-nsg={{ enable_preconfigured_nsg }}{% else %}{{ omit }}{% endif %}" + - "{% if fips_validated_modules is defined %}--fips-validated-modules={{ fips_validated_modules }}{% else %}{{ omit }}{% endif %}" + - "{% if ingress_visibility is defined %}--ingress-visibility={{ ingress_visibility }}{% else %}{{ omit }}{% endif %}" + - "{% if master_encryption_at_host is defined %}--master-encryption-at-host={{ master_encryption_at_host }}{% else %}{{ omit }}{% endif %}" + - "{% if master_vm_size is defined %}--master-vm-size={{ master_vm_size }}{% else %}{{ omit }}{% endif %}" + - "{% if outbound_type is defined %}--outbound-type={{ outbound_type }}{% else %}{{ omit }}{% endif %}" + - "{% if pod_cidr is defined %}--pod-cidr={{ pod_cidr }}{% else %}{{ omit }}{% endif %}" + - "{% if service_cidr is defined %}--service-cidr={{ service_cidr }}{% else %}{{ omit }}{% endif %}" + - "{% if version is defined %}--version={{ version }}{% else %}{{ omit }}{% endif %}" + - "{% if worker_encryption_at_host is defined %}--worker-encryption-at-host={{ worker_encryption_at_host }}{% else %}{{ omit }}{% endif %}" + - "{% if worker_count is defined %}--worker-count={{ worker_count }}{% else %}{{ omit }}{% endif %}" + - "{% if worker_vm_size is defined %}--worker-vm-size={{ worker_vm_size }}{% else %}{{ omit }}{% endif %}" + - --tags=createdby='{{ currentuser_info.userPrincipalName }}' createdwith=ansible purge=true + - -o=yaml + delegate_to: localhost + register: aro_create_result +- name: create_aro_cluster | Set fact aro_create_result + when: aro_existing_cluster is not success + ansible.builtin.set_fact: + aro_cluster_state: "{{ aro_create_result.stdout | from_yaml }}" +- name: create_aro_cluster | Set fact aro_create_result + when: aro_existing_cluster is success + ansible.builtin.set_fact: + aro_cluster_state: "{{ aro_existing_cluster.stdout | from_yaml }}" + +- name: create_aro_cluster | Debug aro_cluster_state + ansible.builtin.debug: + var: aro_cluster_state +- name: create_aro_cluster | Get cluster kubeconfig + ansible.builtin.command: + argv: ["az", "aro", "get-admin-kubeconfig", "--name={{ name }}", "--resource-group={{ resource_group }}", "-f=/tmp/{{ name }}.kubeconfig"] + delegate_to: localhost + register: aro_credentails_result +- name: create_aro_cluster | Copy cluster kubeconfig to jumphost + when: delegation != "localhost" + ansible.builtin.copy: + src: /tmp/{{ name }}.kubeconfig + dest: /tmp/{{ name }}.kubeconfig + owner: root + group: root + mode: '0644' + become: true + delegate_to: "{{ delegation }}" + +- name: create_aro_cluster | Cluster info + # Wait for cluster to become reachable + kubernetes.core.k8s_cluster_info: + kubeconfig: /tmp/{{ name }}.kubeconfig + delegate_to: "{{ delegation }}" + register: k8s_cluster_info + retries: 10 + delay: 60 +- name: create_aro_cluster | Get clusterversion + kubernetes.core.k8s_info: + kubeconfig: /tmp/{{ name }}.kubeconfig + api_version: config.openshift.io/v1 + kind: ClusterVersion + name: version + delegate_to: "{{ delegation }}" + register: oc_get_clusterversion + retries: 10 # Need retries to wait for real ingress cert to deploy + delay: 60 +- name: create_aro_cluster | Debug clusterversion status conditions + ansible.builtin.debug: + var: oc_get_clusterversion.resources[0].status.conditions diff --git a/ansible/tasks/create_jumphost.yaml b/ansible/tasks/create_jumphost.yaml new file mode 100644 index 00000000000..824b030325b --- /dev/null +++ b/ansible/tasks/create_jumphost.yaml @@ -0,0 +1,125 @@ +- name: create_jumphost | Jumphost network security group + azure.azcollection.azure_rm_securitygroup: + name: jumphost-nsg + resource_group: "{{ resource_group }}" + rules: + - name: ssh + protocol: Tcp + destination_port_range: 22 + access: Allow + priority: 100 + direction: Inbound + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" + delegate_to: localhost + register: jumphost_nsg_state +- name: create_jumphost | Debug jumphost_nsg_state + ansible.builtin.debug: + var: jumphost_nsg_state + +- name: create_jumphost | Jumphost subnet + azure.azcollection.azure_rm_subnet: + name: jumphost + virtual_network_name: "{{ vnet_name | default('aro-vnet') }}" + address_prefix_cidr: "{{ jumphost_cidr | default('192.168.254.240/28') }}" + resource_group: "{{ resource_group }}" + security_group: jumphost-nsg + delegate_to: localhost + register: jumphost_subnet_state +- name: create_jumphost | Debug master_subnet_state + ansible.builtin.debug: + var: jumphost_subnet_state + +- name: create_jumphost | Set ansible private key file + ansible.builtin.set_fact: + ansible_ssh_private_key_file: "/root/.ssh/{{ SSH_KEY_BASENAME }}.pub" +- name: create_jumphost | Create jumphost vm + azure.azcollection.azure_rm_virtualmachine: + name: "jumphost" + resource_group: "{{ resource_group }}" + image: + publisher: Debian # RedHat + offer: debian-11-daily # rh-rhel + sku: 11-gen2 # rh-rhel9 + version: latest + vm_size: "{{ jumphost_vm_size | default('Standard_B1ls') }}" + admin_username: arosre + ssh_password_enabled: false + ssh_public_keys: + - path: /home/arosre/.ssh/authorized_keys + # Get SSH public key file contents from the path "${HOME}/${SSH_RSA_KEY_BASENAME}.pub" + key_data: "{{ lookup('ansible.builtin.file', ansible_ssh_private_key_file) }}" + virtual_network_name: aro-vnet + subnet_name: jumphost + managed_disk_type: Standard_LRS + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" + delegate_to: localhost + register: jumphost_state +- name: create_jumphost | Debug jumphost_state + ansible.builtin.debug: + var: jumphost_state + +- name: create_jumphost | Get jumphost public IP + azure.azcollection.azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: jumphost01 + delegate_to: localhost + register: jumphost_ip_info +- name: create_jumphost | Debug jumphost_ip_info + ansible.builtin.debug: + var: jumphost_ip_info +- name: create_jumphost | Create jumphost in inventory + ansible.builtin.add_host: + name: jumphost + ansible_user: arosre + ansible_ssh_host: "{{ jumphost_ip_info.publicipaddresses[0].ip_address }}" +- name: create_jumphost | Change delegation to jumphost + ansible.builtin.set_fact: + delegation: jumphost + +- name: create_jumphost | SSH Host Key Magic + # Extract the ssh host keys from the jumpbox VM via az cli, then populate + # the ansible container's known_hosts file with the proper keys + # to avoid getting unknown key errors + block: + - name: create_jumphost | Extract host keys from jumphost VM + ansible.builtin.command: + argv: [ + "az", "vm", "run-command", "invoke", + "--name", "jumphost", + "--resource-group", "{{ resource_group }}", + "--command-id", "RunShellScript", + "--scripts", "cat /etc/ssh/ssh_host*.pub", + "-o=yaml"] + delegate_to: localhost + register: jumphost_cat_hostkeys_result + retries: 10 # Wait for jumphost VM to boot + delay: 10 + - name: create_jumphost | Set fact jumphost_cat_hostkeys + ansible.builtin.set_fact: + jumphost_cat_hostkeys: "{{ jumphost_cat_hostkeys_result.stdout | from_yaml }}" + - name: create_jumphost | Write jumphost hostkeys + ansible.builtin.command: + argv: + - "bash" + - "-c" + - "umask 077; mkdir /root/.ssh; echo \"{{ jumphost_cat_hostkeys.value[0].message }}\" | + sed -e \"/^$/d\" -e \"/^[E\\[]/d\" -e \"s/^/{{ jumphost_ip_info.publicipaddresses[0].ip_address }} /\" -e \"s/ root.*//\" + > /root/.ssh/known_hosts" + delegate_to: localhost + # register: d +- name: create_jumphost | Gather facts + ansible.builtin.setup: + delegate_to: jumphost + register: jumphost_facts +- name: create_jumphost | Install required python packages + become: true + ansible.builtin.apt: + pkg: + - python3-kubernetes + delegate_to: jumphost diff --git a/ansible/tasks/create_resourcegroup.yaml b/ansible/tasks/create_resourcegroup.yaml new file mode 100644 index 00000000000..25ffcea0956 --- /dev/null +++ b/ansible/tasks/create_resourcegroup.yaml @@ -0,0 +1,40 @@ +--- +- name: create_resourcegroup | Get subscription info + azure.azcollection.azure_rm_subscription_info: + delegate_to: localhost + register: sub_status +- name: create_resourcegroup | Debug sub_status + ansible.builtin.debug: + # msg: Using subscription {{ sub_status.subscriptions[0].display_name }} {{ sub_info.subscriptions[0].subscription_id }} + var: sub_status +- name: create_resourcegroup | Select subscription + ansible.builtin.set_fact: + sub_info: "{{ sub_status.subscriptions[0] }}" +- name: create_resourcegroup | Debug sub_info + ansible.builtin.debug: + var: sub_info +- name: create_resourcegroup | Get current user info + delegate_to: localhost + register: signedinuser_output + ansible.builtin.command: + argv: ["az", "ad", "signed-in-user", "show", "-o=yaml"] +- name: create_resourcegroup | Set fact currentuser_info + ansible.builtin.set_fact: + currentuser_info: "{{ signedinuser_output.stdout | from_yaml }}" +- name: create_resourcegroup | Debug currentuser_info + ansible.builtin.debug: + var: currentuser_info + +- name: create_resourcegroup | Resource group + azure.azcollection.azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: "{{ location }}" + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" + delegate_to: localhost + register: rg_info +- name: create_resourcegroup | Debug rg_info + ansible.builtin.debug: + var: rg_info diff --git a/ansible/tasks/create_vnet.yaml b/ansible/tasks/create_vnet.yaml new file mode 100644 index 00000000000..e1ff5d0c068 --- /dev/null +++ b/ansible/tasks/create_vnet.yaml @@ -0,0 +1,81 @@ +--- +- name: create_vnet | Vnet + azure.azcollection.azure_rm_virtualnetwork: + name: "{{ vnet_name | default('aro-vnet') }}" + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - "{{ network_prefix_cidr }}" + - "{% if apiserver_visibility is defined and apiserver_visibility == 'Private' + %}{{ jumphost_cidr | default('192.168.254.240/28') }}{% else %}{{ omit }}{% endif %}" + location: "{{ location }}" + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" + delegate_to: localhost + register: vnet_state +- name: create_vnet | Debug vnet_state + ansible.builtin.debug: + var: vnet_state + +- name: create_vnet | Create route table + when: "routes is defined" + block: + - name: create_vnet | Create route table + azure.azcollection.azure_rm_routetable: + name: "{{ vnet_name | default('aro-vnet') }}-rt" + resource_group: "{{ resource_group }}" + location: "{{ location }}" + tags: + createdby: "{{ currentuser_info.userPrincipalName }}" + createdwith: "ansible" + purge: "true" + delegate_to: localhost + register: route_table_state + - name: create_vnet | Debug route_table_state + ansible.builtin.debug: + var: route_table_state + - name: create_vnet | Create routes + azure.azcollection.azure_rm_route: + resource_group: "{{ resource_group }}" + name: "{{ item.name }}" + address_prefix: "{{ item.address_prefix }}" + next_hop_type: "{{ item.next_hop_type }}" + route_table_name: "{{ vnet_name | default('aro-vnet') }}-rt" + loop: "{{ routes }}" + delegate_to: localhost + register: route_entry_state + - name: create_vnet | Debug route_entry_state + ansible.builtin.debug: + var: route_entry_state + +- name: create_vnet | Master subnet + azure.azcollection.azure_rm_subnet: + name: master + virtual_network_name: "{{ vnet_name | default('aro-vnet') }}" + address_prefix_cidr: "{{ master_cidr }}" + resource_group: "{{ resource_group }}" + # private_link_service_network_policies: + # "{% if outbound_type is defined and outbound_type == 'UserDefinedRouting' %}Disabled{% else %}{{ omit }}{% endif %}" + # Setting this statically to "Disabled" because otherwise updating the subnet fails with + # Error creating or updating subnet master - (PrivateLinkServiceNetworkPoliciesCannotBeEnabledOnPrivateLinkServiceSubnet) Private link service network + # policies cannot be enabled on private link service subnet + private_link_service_network_policies: Disabled + route_table: "{% if routes is defined %}{{ vnet_name | default('aro-vnet') }}-rt{% else %}{{ omit }}{% endif %}" + delegate_to: localhost + register: master_subnet_state +- name: create_vnet | Debug master_subnet_state + ansible.builtin.debug: + var: master_subnet_state +- name: create_vnet | Worker subnet + azure.azcollection.azure_rm_subnet: + name: worker + virtual_network_name: "{{ vnet_name | default('aro-vnet') }}" + address_prefix_cidr: "{{ worker_cidr }}" + resource_group: "{{ resource_group }}" + route_table: "{% if routes is defined %}{{ vnet_name | default('aro-vnet') }}-rt{% else %}{{ omit }}{% endif %}" + delegate_to: localhost + register: worker_subnet_state +- name: create_vnet | Debug worker_subnet_state + ansible.builtin.debug: + var: worker_subnet_state diff --git a/ansible/tasks/delete_aro_cluster.yaml b/ansible/tasks/delete_aro_cluster.yaml new file mode 100644 index 00000000000..e10c98aa5a3 --- /dev/null +++ b/ansible/tasks/delete_aro_cluster.yaml @@ -0,0 +1,9 @@ +--- +- name: delete_aro_cluster | Delete aro cluster + when: CLEANUP is defined and CLEANUP == "True" + azure.azcollection.azure_rm_openshiftmanagedcluster: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + location: "{{ location }}" + state: absent + delegate_to: localhost diff --git a/ansible/tasks/delete_resourcegroup.yaml b/ansible/tasks/delete_resourcegroup.yaml new file mode 100644 index 00000000000..a2b67dc666a --- /dev/null +++ b/ansible/tasks/delete_resourcegroup.yaml @@ -0,0 +1,9 @@ +--- +- name: delete_resourcegroup | Delete resource group + when: CLEANUP is defined and CLEANUP == "True" + azure.azcollection.azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: "{{ location }}" + force_delete_nonempty: true + state: absent + delegate_to: localhost