From 6078bdc8c97790f6fd66941b15471ff0708f0d44 Mon Sep 17 00:00:00 2001 From: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Date: Thu, 21 Dec 2023 22:40:34 +0530 Subject: [PATCH] Reconciliation of the branches (#517) Fix configuration script and enable user-specific service installation. Update Ansible version variable name. Enable accelerated networking for SCS VM. Update management DNS resource group value. Fix file download in sync_deployer.sh. Update password validation in 0.1-passwords role. Clear errors during STONITH device enablement. Update package versions for Red Hat 8.9. Check required variable in sap-installation-media-storage-details role. Fix package version verification in 1.17.0-set_runtime_facts role enable systemd based options for HA cluster configuration --- .../ParameterDetails/LandscapeTemplate.txt | 2 +- .../playbook_00_validate_parameters.yaml | 98 ++++++++-------- .../roles-misc/0.1-passwords/tasks/main.yaml | 6 +- .../roles-misc/0.2-kv-secrets/tasks/main.yaml | 7 +- .../tasks/main.yaml | 4 +- .../tasks/1.17.0-set_runtime_facts.yml | 2 +- .../tasks/1.17.2.0-cluster-RedHat.yml | 3 + .../roles-sap/5.1-dbload/tasks/main.yaml | 8 +- .../roles-sap/5.2-pas-install/tasks/main.yaml | 34 +++++- .../roles-sap/5.3-app-install/tasks/main.yaml | 30 +++++ .../tasks/5.5.4.1-cluster-RedHat.yml | 73 ++++++++++++ .../tasks/5.6.1-set_runtime_facts.yml | 27 +++-- .../tasks/5.6.4.1-scsersprofile.yaml | 30 +++-- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 50 ++++++-- ...5.6.4.2-sap-resources-Suse-SimpleMount.yml | 81 +++++++++---- .../tasks/5.6.4.2-sap-resources-Suse.yml | 81 +++++++++---- .../tasks/5.6.7-config-systemd-sap-start.yml | 109 ++++++++++++++++++ .../pipelines/05-DB-and-SAP-installation.yaml | 8 +- deploy/scripts/configure_deployer.sh | 87 +++++++++----- deploy/scripts/installer.sh | 8 ++ deploy/scripts/setup_ado.sh | 9 +- deploy/scripts/sync_deployer.sh | 8 +- deploy/terraform/run/sap_system/module.tf | 8 +- deploy/terraform/run/sap_system/output.tf | 2 +- deploy/terraform/run/sap_system/providers.tf | 2 +- .../templates/configure_deployer.sh.tmpl | 3 +- .../sap_system/app_tier/infrastructure.tf | 14 +-- .../sap_system/app_tier/variables_local.tf | 4 +- .../modules/sap_system/app_tier/vm-app.tf | 7 +- .../modules/sap_system/app_tier/vm-scs.tf | 2 +- 30 files changed, 592 insertions(+), 215 deletions(-) create mode 100644 deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 6b1035f48f..a656a56a02 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -489,7 +489,7 @@ $$utility_vm_os_disk_size$$ # Defines the type of the OS disk for the Virtual Machine $$utility_vm_os_disk_type$$ - + # Defines if the utility virtual machine uses DHCP $$utility_vm_useDHCP$$ diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index a6760e35ae..e8ee37e142 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -62,20 +62,20 @@ - db_high_availability is defined - database_high_availability is not defined - - name: "0.0 Validations - Check required variables are present and not empty" + - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" - fail_msg: "{{ item_to_check.error }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 + fail_msg: "{{ item_to_check.error }}" loop: - - { parameter: "database_high_availability", error: "database_high_availability has no value assigned" } - - { parameter: "database_cluster_type", error: "database_cluster_type has no value assigned" } - - { parameter: "scs_high_availability", error: "scs_high_availability has no value assigned" } - - { parameter: "scs_cluster_type", error: "scs_cluster_type has no value assigned" } - - { parameter: "use_msi_for_clusters", error: "use_msi_for_clusters has no value assigned" } - - { parameter: "platform", error: "platform has no value assigned" } + - { parameter: "database_high_availability", error: "database_high_availability has no value assigned" } + - { parameter: "database_cluster_type", error: "database_cluster_type has no value assigned" } + - { parameter: "scs_high_availability", error: "scs_high_availability has no value assigned" } + - { parameter: "scs_cluster_type", error: "scs_cluster_type has no value assigned" } + - { parameter: "use_msi_for_clusters", error: "use_msi_for_clusters has no value assigned" } + - { parameter: "platform", error: "platform has no value assigned" } loop_control: loop_var: item_to_check @@ -84,16 +84,16 @@ # Show parameters used for cluster aware coding # # -------------------------------------+---------------------------------------8 - - name: "Cluster aware code..." + - name: "Cluster aware code..." ansible.builtin.debug: - msg: # Best method for formatting output with Azure Devops Logs - - "database_high_availability: {{ database_high_availability }}" - - "database_cluster_type: {{ database_cluster_type }}" - - "scs_high_availability: {{ scs_high_availability }}" - - "scs_cluster_type: {{ scs_cluster_type }}" - - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - - "platform: {{ platform | upper }}" - verbosity: 2 + msg: # Best method for formatting output with Azure Devops Logs + - "database_high_availability: {{ database_high_availability }}" + - "database_cluster_type: {{ database_cluster_type }}" + - "scs_high_availability: {{ scs_high_availability }}" + - "scs_cluster_type: {{ scs_cluster_type }}" + - "use_msi_for_clusters: {{ use_msi_for_clusters }}" + - "platform: {{ platform | upper }}" + verbosity: 2 # -------------------------------------+ # Fencing support is only needed when: @@ -118,17 +118,17 @@ # when: (database_high_availability and database_cluster_type == "AFA") or # (scs_high_availability and scs_cluster_type == "AFA") - - name: "0.0 Validations - Retrieve the Fencing SPN details" + - name: "0.0 Validations - Retrieve the Fencing SPN details" ansible.builtin.include_role: - name: roles-misc/0.2-kv-secrets + name: roles-misc/0.2-kv-secrets vars: - operation: fencing + operation: fencing when: - - (database_high_availability and database_cluster_type == "AFA") or - (scs_high_availability and scs_cluster_type == "AFA") - - platform != "ORACLE" + - (database_high_availability and database_cluster_type == "AFA") or + (scs_high_availability and scs_cluster_type == "AFA") + - platform != "ORACLE" tags: - - kv-secrets + - kv-secrets # -------------------------------------+ @@ -152,9 +152,9 @@ - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -187,9 +187,9 @@ - name: "0.0 Validations - Check required SCS HA variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -217,9 +217,9 @@ - name: "0.0 Validations - Check required SCS HA fencing variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -248,9 +248,9 @@ - name: "0.0 Validations - Check required Database HA variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -369,8 +369,8 @@ - name: "0.0 Validations - Check for free disk space on deployer" ansible.builtin.assert: - that: "{{ (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (deployer_free_temp_disk_space | int) }}" - fail_msg: "The deployer needs at least {{ deployer_free_temp_disk_space }} GB of free disk space in /mnt" + that: (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (deployer_free_temp_disk_space | int) + fail_msg: "The deployer needs at least {{ deployer_free_temp_disk_space }} GB of free disk space in /mnt" when: - mnt_free_diskspace | length > 0 tags: @@ -473,7 +473,7 @@ - name: Validate SCS and PAS instance numbers ansible.builtin.assert: that: - - "scs_instance_number != pas_instance_number" + - scs_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number when installing PAS on ASCS" when: (ansible_play_hosts_all | length) == 2 tags: @@ -482,7 +482,7 @@ - name: "0.0 Validations - Validate SCS and PAS instance numbers" ansible.builtin.assert: that: - - "scs_instance_number != pas_instance_number" + - scs_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: @@ -491,7 +491,7 @@ - name: "0.0 Validations - Validate DB and PAS instance numbers" ansible.builtin.assert: that: - - "db_instance_number != pas_instance_number" + - db_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the db_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: @@ -611,12 +611,12 @@ - ansible_os_family != "Windows" - name: "0.0 Validations - Create validation-done flag" - delegate_to: localhost + delegate_to: localhost become: false ansible.builtin.file: - path: "{{ _workspace_directory }}/.progress/validation-done" - state: touch - mode: 0755 + path: "{{ _workspace_directory }}/.progress/validation-done" + state: touch + mode: 0755 - name: "0.0 Validations - Netmask" ansible.builtin.debug: @@ -671,7 +671,7 @@ - name: "0.0 Validations - Check for free disk space on SCS" ansible.builtin.assert: that: - - "{{ (free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (scs_free_diskspace | int) }}" + - (free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (scs_free_diskspace | int) fail_msg: "The SCS server needs at least {{ scs_free_diskspace }} GB of free disk space in /mnt" tags: - 0.0-scs-diskspace diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml index 79f1967c1f..b53c029d68 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml @@ -97,9 +97,9 @@ - name: "0.1 Password: - Ensure the password is set" ansible.builtin.assert: that: - - "{{ sap_password is defined }}" # Has the variable been defined - - "{{ sap_password | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ sap_password | trim | length > 8 }}" + - sap_password is defined # Has the variable been defined + - sap_password | type_debug != 'NoneType' # Is the variable not empty" + - sap_password | trim | length > 8 fail_msg: "The SAP main password was not set in key vault" - name: "0.1 Password: - Show SAP Password" diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 54840b167b..85940d854d 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -32,9 +32,7 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 - when: - - operation is defined - - operation == "fencing" + when: operation == "fencing" # -------------------------------------+---------------------------------------8 # @@ -42,7 +40,6 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation is defined - operation == "SoftwareAcquisition" @@ -52,7 +49,6 @@ - name: "0.2 Key Vault: - Import Fencing secrets" ansible.builtin.import_tasks: "fencing.yaml" when: - - operation is defined - operation == "fencing" - (database_high_availability and database_cluster_type == "AFA") or (scs_high_availability and scs_cluster_type == "AFA") # AFA (Azure Fencing Agent) @@ -66,7 +62,6 @@ ansible.builtin.import_tasks: "wincluster-witness.yaml" # TODO: update when clause more appropriately when: - - operation is defined - operation == "fencing" - (scs_high_availability or database_high_availability) - not use_msi_for_clusters diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index 3f69f6df37..ad2a99d7c0 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -167,8 +167,8 @@ - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ sapbits_sas_token is defined }}" # Has the variable been defined - - "{{ sapbits_sas_token | trim | length > 1 }}" # Does the variable have a value + - sapbits_sas_token is defined # Has the variable been defined + - sapbits_sas_token | trim | length > 1 # Does the variable have a value fail_msg: >- "The variable 'sapbits_sas_token' is not defined or is empty. Please provide it in the deployer key vault, sap-parameters file or pass it in as a parameter." diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml index 3da499c9ff..d08271213f 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml @@ -251,7 +251,7 @@ - name: "Verify that the packages are the right version" ansible.builtin.assert: that: - - "ansible_facts.packages['{{ packages_list.name }}'][0].version is version('{{ packages_list.version }}', '>=', version_type='{{ packages_list.version_type }}')" + - 'ansible_facts.packages[ packages_list.name ][0].version is version( packages_list.version , ">=", version_type= packages_list.version_type )' fail_msg: "{{ packages_list.name }} version is not greater than {{ packages_list.version }}" success_msg: "{{ packages_list.name }} version is greater than {{ packages_list.version }}" register: package_version_results diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 959f85fe84..a8aa2c3db3 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -205,6 +205,9 @@ - name: "1.17 Generic Pacemaker - Ensure the STONITH device is enabled" ansible.builtin.command: pcs property set stonith-enabled=true + - name: "1.17 Generic Pacemaker - Clear any errors during enablement of STONITH device" + ansible.builtin.command: pcs resource cleanup + # /*---------------------------------------------------------------------------8 # | | # | Fencing - END | diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 94193865cc..14d34a4f28 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -60,13 +60,17 @@ public: true when: bom.InstanceType is defined +- name: "DBLoad: register application type" + ansible.builtin.set_fact: + application_type: "{% if instance_type == 'SCS' %}JAVA{% else %}ABAP{% endif %}" + - name: "DBLoad: - Set the product ID" ansible.builtin.set_fact: product_id: "{{ bom.product_ids.dbl }}" - name: "DBLoad: - Set dbload path" ansible.builtin.set_fact: - db_load_path: "{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}/sapinst_instdir/{{ product_id.replace('.', '/').replace('/' + instance_type, '').split(':')[1] }}/INSTALL/DISTRIBUTED/{{ instance_type }}/DB" + db_load_path: "{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}/sapinst_instdir/{{ product_id.replace('.', '/').replace('/' + application_type, '').split(':')[1] }}/INSTALL/DISTRIBUTED/{{ application_type }}/DB" - name: "DBLoad: Check if DBLoad has been tried on this server" ansible.builtin.stat: @@ -92,7 +96,7 @@ - name: "DBLoad: flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_dbload.txt" - state: touch + state: touch mode: 0755 when: - dbload_performed_according_to_sapinst is defined diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 8a52f09193..18338621f0 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -33,8 +33,8 @@ sap_inifile: "{{ bom_base_name }}-pas-{{ sid_to_be_deployed.sid | lower }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-pas-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | lower }}-params" - db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ db_server_temp }}{% endif %}" - name: "PAS Install: Set BOM facts db host" ansible.builtin.set_fact: @@ -90,6 +90,36 @@ pas_bom_instance_type: "{% if bom.InstanceType is defined %}{{ bom.InstanceType }}{% else %}ABAP{% endif %}" public: true +- name: "PAS Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + timeout: 30 + msg: 'INSTALL:0026:PAS Install failed, database is unreachable.' + register: db_port_open + failed_when: false + when: + - database_high_availability + - platform == "HANA" + +- name: "PAS Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + msg: "{{ db_port_open.msg }}" + verbosity: 2 + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0026:PAS Install failed, database is unreachable." + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + - name: "PAS Install: Set schema_name variable for HANA" when: platform == "HANA" block: diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 6b82e2f747..c0526aa27f 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -73,6 +73,36 @@ ansible.builtin.set_fact: app_bom_id: "{{ bom.product_ids.app }}" +- name: "APP Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + timeout: 30 + msg: 'INSTALL:0026:APP Install failed, database is unreachable.' + register: db_port_open + failed_when: false + when: + - database_high_availability + - platform == "HANA" + +- name: "APP Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + msg: "{{ db_port_open.msg }}" + verbosity: 2 + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0026:APP Install failed, database is unreachable." + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + - name: "APP Install: Set schema_name variable for HANA" when: platform == "HANA" block: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index de5df63a15..711587ae0b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -231,3 +231,76 @@ when: ansible_hostname == primary_instance_name # End of HANA clustering resources + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index d2481bdd39..c587ccce48 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -36,7 +36,7 @@ verbosity: 2 # Returns bom object -- name: "5.6 SCSERS: SCS HA Install - Register BoM" +- name: "5.6 SCSERS - SCS HA Install - Register BoM" ansible.builtin.include_role: name: roles-sap/3.3.1-bom-utility tasks_from: bom-register @@ -46,12 +46,12 @@ sa_enabled: true when: bom is not defined -- name: "5.6 SCS HA Install: Default instance type" +- name: "5.6 SCSERS - HA Install: Default instance type" ansible.builtin.set_fact: instance_type: "ASCS" when: bom.InstanceType is not defined -- name: "5.6 SCS HA Install: register instance type" +- name: "5.6 SCSERS - HA Install: register instance type" ansible.builtin.set_fact: instance_type: >- {%- set _instance_type = 'ASCS' -%} @@ -66,7 +66,7 @@ public: true when: bom.InstanceType is defined -- name: "5.6 SCS HA Install: Populate InstanceName for cluster resource" +- name: "5.6 SCSERS - HA Install: Populate InstanceName for cluster resource" ansible.builtin.set_fact: instance_name: "{{ sap_sid }}_{{ instance_type }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" start_profile_path: "/sapmnt/{{ sap_sid }}/profile/{{ sap_sid }}_{{ instance_type }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" @@ -74,7 +74,7 @@ # Set fact for the timeout value of monitor operation for filesystem and ASCS/ERS resources # Since we only configure NFS4.1, the timeout value is set to 40 seconds for non-ANF and 105 seconds for ANF -- name: "5.6 SCS HA install calculate filesystem timeout" +- name: "5.6 SCSERS - HA install calculate filesystem timeout" ansible.builtin.set_fact: clus_fs_mon_timeout: >- {%- set _timeoutvalue = 40 -%} @@ -91,7 +91,7 @@ when: - scs_high_availability -- name: "5.6 SCS HA install calculate SAP resource monitor timeout" +- name: "5.6 SCSERS - HA install calculate SAP resource monitor timeout" ansible.builtin.set_fact: clus_sap_mon_timeout: >- {%- set _timeoutvalue = 60 -%} @@ -108,19 +108,28 @@ when: - scs_high_availability -- name: "ASCS/ERS check if installed" +- name: "5.6 SCSERS - ASCS/ERS check if installed" become: true block: - - name: "SCS HA Install: check if installed" + - name: "5.6 SCSERS - SCS HA Install: check if installed" ansible.builtin.stat: path: /etc/sap_deployment_automation//{{ sap_sid | upper }}/sap_deployment_scs.txt register: scs_installed - - name: "ERS Install: check if installed" + - name: "5.6 SCSERS - ERS Install: check if installed" ansible.builtin.stat: path: /etc/sap_deployment_automation//{{ sap_sid | upper }}/sap_deployment_ers.txt register: ers_installed + +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.6 SCSERS - check if the OS version is SLES 15 or newer" + ansible.builtin.set_fact: + is_sles_15_or_newer: "{{ ansible_distribution_version is version('15', '>=') | default(false) }}" + # /*---------------------------------------------------------------------------8 # | END | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml index 911ebd9b86..476ad229ba 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml @@ -31,8 +31,14 @@ ansible.builtin.replace: backup: true path: /usr/sap/sapservices - regexp: '^LD_LIBRARY_PATH=' - replace: "#LD_LIBRARY_PATH=" + regexp: '^([^#\n].+{{ sapservice }}.+)$' + replace: '# \1' + loop: + - "{{ sap_sid | upper }}_{{ instance_type | upper }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" + - "{{ sap_sid | upper }}_ERS{{ ers_instance_number }}_{{ ers_virtual_hostname }}" + loop_control: + label: "{{ sapservice }}" + loop_var: sapservice when: - ansible_os_family | upper == "REDHAT" @@ -43,8 +49,10 @@ - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - ansible_os_family | upper == "SUSE" + - inventory_hostname == primary_instance_name block: - - name: "5.6 SCSERS - ASCS Profile - add service/halib" ansible.builtin.blockinfile: path: '{{ scs_instance_profile_path }}' @@ -60,12 +68,13 @@ service/halib = $(DIR_CT_RUN)/saphascriptco.so service/halib_cluster_connector = /usr/bin/sap_suse_cluster_connector register: ersservicehalib - when: - - ansible_os_family | upper == "SUSE" - - inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - ansible_os_family | upper == "REDHAT" + - inventory_hostname == primary_instance_name block: - name: "5.6 SCSERS - ASCS Profile - add service/halib" ansible.builtin.blockinfile: @@ -82,12 +91,12 @@ service/halib = $(DIR_CT_RUN)/saphascriptco.so service/halib_cluster_connector = /usr/bin/sap_cluster_connector register: ersservicehalib - when: - - ansible_os_family | upper == "REDHAT" - - inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - inventory_hostname == primary_instance_name block: # SAP introduced support for enqueue server 2, including replication, as of SAP NW 7.52. # Starting with ABAP Platform 1809, enqueue server 2 is installed by default @@ -116,8 +125,7 @@ replace: '# Autostart' tags: - ersautostart - when: - - inventory_hostname == primary_instance_name + # Following are the changes in ASCS/ERS profiles based if ENSA1 is applicable - name: "5.6 SCSERS - Add the keep alive parameter, if using ENSA1" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index ecfa7b6251..2c335ac124 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -52,12 +52,12 @@ - name: "5.6 SCSERS - RHEL - ENSA1 - Bring Primary node online" ansible.builtin.command: pcs node unstandby {{ primary_instance_name }} - - name: "5.6 SCSERS - RHEL - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: pcs property set maintenance-mode=false + # - name: "5.6 SCSERS - RHEL - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: pcs property set maintenance-mode=false - - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 # Use the following if using ENSA2 - name: "5.6 SCSERS - RHEL - SAP Resources - Cluster Configuration after Install" @@ -106,19 +106,45 @@ register: co_location failed_when: co_location.rc > 1 - - name: "5.6 SCSERS - RHEL - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: pcs property set maintenance-mode=false + # - name: "5.6 SCSERS - RHEL - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: pcs property set maintenance-mode=false # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: # timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - RHEL - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer | default(false) + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -# | These are common tasks +# | These are common tasks | # +------------------------------------+---------------------------------------*| -- name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" - ansible.builtin.command: pcs property set maintenance-mode=true + +# - name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" +# ansible.builtin.command: pcs property set maintenance-mode=true - name: "5.6 SCSERS - RHEL - Reboot and wait 5 minutes" ansible.builtin.debug: @@ -126,8 +152,8 @@ - name: "5.6 SCSERS - RHEL - Reboot the primary/secondary instance" ansible.builtin.reboot: -# reboot_timeout: 300 -# post_reboot_delay: 300 + reboot_timeout: 300 + post_reboot_delay: 300 - name: "5.6 SCSERS - RHEL - Set the Cluster out of maintenance mode" ansible.builtin.command: pcs property set maintenance-mode=false diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml index 70d48b8a17..9405b3e2d9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml @@ -88,12 +88,12 @@ - name: " Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" - ansible.builtin.pause: - seconds: 120 + # - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" + # ansible.builtin.pause: + # seconds: 120 # [1] Create the SAP cluster resources - if using ENSA2 - @@ -180,34 +180,69 @@ - name: "5.6 SCSERS - SUSE - ENSA2 - Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - SLES - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" + when: + - is_sles_15_or_newer is defined + - is_sles_15_or_newer | default(false) + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -- name: "5.6 SCSERS - SUSE - Set the cluster on maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=true - -- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" +- name: "Get the cluster maintenance mode status" + ansible.builtin.shell: crm configure get_property maintenance-mode + register: get_status_maintenance_mode + changed_when: false + ignore_errors: true + +- name: "Set the cluster maintenance mode if not already in maintenance mode" + ansible.builtin.shell: crm configure property maintenance-mode=true + when: >- + get_status_maintenance_mode.stdout is not search('true') or + get_status_maintenance_mode.stdout is search('false') + +- name: "5.6 SCSERS - SLES - Reboot and wait 5 minutes" + ansible.builtin.debug: + msg: "Reboot and wait 5 minutes" + +- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 300 - + reboot_timeout: 300 + post_reboot_delay: 300 -- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false +- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" + ansible.builtin.command: crm configure property maintenance-mode=false -- name: "5.6 SCSERS - SUSE - cluster group validation" +- name: "5.6 SCSERS - SUSE - cluster group validation" ansible.builtin.include_tasks: - file: "5.6.6-validate.yml" + file: "5.6.6-validate.yml" apply: - become: true - become_user: root + become: true + become_user: root tags: - "5.6.6-validate" when: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml index 25836d2594..8b5d47f597 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml @@ -69,12 +69,12 @@ # - name: " Bring primary node online " # ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" - ansible.builtin.pause: - seconds: 120 + # - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" + # ansible.builtin.pause: + # seconds: 120 # [1] Create the SAP cluster resources - if using ENSA2 - @@ -141,34 +141,69 @@ - name: "5.6 SCSERS - SUSE - ENSA2 - Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - SLES - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" + when: + - is_sles_15_or_newer is defined + - is_sles_15_or_newer | default(false) + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -- name: "5.6 SCSERS - SUSE - Set the cluster on maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=true - -- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" +- name: "Get the cluster maintenance mode status" + ansible.builtin.shell: crm configure get_property maintenance-mode + register: get_status_maintenance_mode + changed_when: false + ignore_errors: true + +- name: "Set the cluster maintenance mode if not already in maintenance mode" + ansible.builtin.shell: crm configure property maintenance-mode=true + when: >- + get_status_maintenance_mode.stdout is not search('true') or + get_status_maintenance_mode.stdout is search('false') + +- name: "5.6 SCSERS - SLES - Reboot and wait 5 minutes" + ansible.builtin.debug: + msg: "Reboot and wait 5 minutes" + +- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 300 - + reboot_timeout: 300 + post_reboot_delay: 300 -- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false +- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" + ansible.builtin.command: crm configure property maintenance-mode=false -- name: "5.6 SCSERS - SUSE - cluster group validation" +- name: "5.6 SCSERS - SUSE - cluster group validation" ansible.builtin.include_tasks: - file: "5.6.6-validate.yml" + file: "5.6.6-validate.yml" apply: - become: true - become_user: root + become: true + become_user: root tags: - "5.6.6-validate" when: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml new file mode 100644 index 0000000000..2bbaad04a2 --- /dev/null +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -0,0 +1,109 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') + | regex_replace('/etc/systemd/system/', '') + }}" + +- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" + when: + - systemd_service_names is defined + - systemd_service_names | length > 0 + block: + # - name: "5.6 SCSERS - Disable the services if they exist" + # ansible.builtin.systemd: + # name: "{{ service_name }}" + # enabled: false + # failed_when: false + # loop: "{{ systemd_service_names }}" + # loop_control: + # loop_var: service_name + + - name: "5.6 SCSERS - Disable and Stop the services if they exist" + become: true + become_user: root + ansible.builtin.systemd: + name: "{{ service_name }}" + enabled: false + state: "stopped" + failed_when: false + loop: "{{ systemd_service_names }}" + loop_control: + loop_var: service_name + + - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + become: true + become_user: root + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Service]" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + + - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" + become: true + become_user: root + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Service]$' + line: "Restart=no" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + register: dropinfile + + - name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # - name: "5.6 SCSERS - validate that the drop-in file is active" + # when: + # ansible.builtin.shell: >- + # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' + # register: dropinfile_validation + # changed_when: false + # failed_when: dropinfile_validation.rc > 0 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index a0f60e3607..9d1a2c7012 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -231,11 +231,10 @@ stages: export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; echo 'Workload Prefix' ${workload_prefix} fi - if [ $EXTRA_PARAMETERS = '$(EXTRA_PARAMETERS)' ]; then - echo "##vso[task.logissue type=warning]No extra parameters were provided." + if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then new_parameters=$PIPELINE_EXTRA_PARAMETERS else - echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" + echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi @@ -247,7 +246,7 @@ stages: echo -e "$green--- az login ---$reset" - #If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one + # If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one deployer_file=/etc/profile.d/deploy_server.sh az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none az account set --subscription $AZURE_SUBSCRIPTION_ID @@ -257,7 +256,6 @@ stages: echo -e "$boldred--- Login failed ---$reset" echo "##vso[task.logissue type=error]az login failed." exit $return_code - fi az keyvault secret show --name ${workload_prefix}-sid-sshkey --vault-name $workload_key_vault --query value -o tsv > artifacts/${SAP_SYSTEM_CONFIGURATION_NAME}_sshkey diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index d414fb942e..6e577791df 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -36,6 +36,13 @@ # Setup some useful shell options # +# Check if the script is running as root +if [[ $EUID -eq 0 ]]; then + echo "This script should not be run as root or with sudo. Please run as a regular user." + exit 1 +fi + + # Print expanded commands as they are about to be executed set -o xtrace @@ -306,7 +313,7 @@ case "$(get_distro_name)" in echo "we are inside ubuntu" rel=$(lsb_release -a | grep Release | cut -d':' -f2 | xargs) if [ "$rel" == "22.04" ]; then - ansible_version="${ansible_version:-2.15}" + ansible_version="2.15" ansible_major="${ansible_version%%.*}" ansible_minor=$(echo "${ansible_version}." | cut -d . -f 2) fi @@ -454,6 +461,8 @@ wget -nv -O /tmp/"${tf_zip}" "https://releases.hashicorp.com/terraform/${tfversi sudo unzip -o /tmp/"${tf_zip}" -d "${tf_dir}" sudo ln -vfs "../$(basename "${tf_dir}")/terraform" "${tf_bin}/terraform" +sudo rm /tmp/"${tf_zip}" + # Uninstall Azure CLI - For some platforms case "$(get_distro_name)" in (ubuntu|sles) @@ -733,11 +742,50 @@ export PATH="${PATH}":"${ansible_bin}":"${tf_bin}":"${HOME}"/Azure_SAP_Automated echo "# Configure environment settings for deployer interactive sessions" | tee -a /tmp/deploy_server.sh echo "export ARM_SUBSCRIPTION_ID=${subscription_id}" | tee -a /tmp/deploy_server.sh -echo "export SAP_AUTOMATION_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh -echo "export DEPLOYMENT_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh -echo "export CONFIG_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/WORKSPACES" | tee -a /tmp/deploy_server.sh -echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/ansible" | tee -a /tmp/deploy_server.sh +# Replace with your actual agent directory +AGENT_DIR="/home/${USER}/agent" + +# Check if the .agent file exists +if [ -f "$AGENT_DIR/.agent" ]; then + echo "Azure DevOps Agent is configured." + echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}" | tee -a /tmp/deploy_server.sh +else + echo "Azure DevOps Agent is not configured." + + echo "export SAP_AUTOMATION_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh + echo "export DEPLOYMENT_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh + echo "export CONFIG_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/WORKSPACES" | tee -a /tmp/deploy_server.sh + + echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/ansible" | tee -a /tmp/deploy_server.sh + + # Set env for MSI + echo "export ARM_USE_MSI=true" | tee -a /tmp/deploy_server.sh + + /usr/bin/az login --identity 2>error.log || : + # Ensure that the user's account is logged in to Azure with specified creds + + if [ ! -f error.log ]; then + /usr/bin/az account show > az.json + client_id=$(jq --raw-output .id az.json) + tenant_id=$(jq --raw-output .tenantId az.json) + rm az.json + else + client_id='' + tenant_id='' + fi + + if [ -n "${client_id}" ]; then + export ARM_CLIENT_ID=${client_id} + echo "export ARM_CLIENT_ID=${client_id}" | tee -a /tmp/deploy_server.sh + fi + + if [ -n "${tenant_id}" ]; then + export ARM_TENANT_ID=${tenant_id} + echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh + fi +fi + # Set env for ansible echo "export ANSIBLE_HOST_KEY_CHECKING=False" | tee -a /tmp/deploy_server.sh @@ -753,46 +801,23 @@ case "$(get_distro_name)" in (sles) echo "export DOTNET_ROOT=${DOTNET_ROOT}" | tee -a /tmp/deploy_server.sh ;; - (rhel*) +(rhel*) ;; esac chown -R "${USER}" "${asad_home}" -# Set env for MSI -echo "export ARM_USE_MSI=true" | tee -a /tmp/deploy_server.sh - -/usr/bin/az login --identity 2>error.log || : -# Ensure that the user's account is logged in to Azure with specified creds - -if [ ! -f error.log ]; then - /usr/bin/az account show > az.json - client_id=$(jq --raw-output .id az.json) - tenant_id=$(jq --raw-output .tenantId az.json) - rm az.json -else - client_id='' - tenant_id='' -fi - -if [ -n "${client_id}" ]; then - export ARM_CLIENT_ID=${client_id} - echo "export ARM_CLIENT_ID=${client_id}" | tee -a /tmp/deploy_server.sh -fi - -if [ -n "${tenant_id}" ]; then - export ARM_TENANT_ID=${tenant_id} - echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh -fi # echo "export DOTNET_ROOT=/snap/dotnet-sdk/current" | tee -a /tmp/deploy_server.sh # Ensure that the user's account is logged in to Azure with specified creds echo 'az login --identity --output none' | tee -a /tmp/deploy_server.sh +# shellcheck disable=SC2016 echo 'echo ${USER} account ready for use with Azure SAP Automated Deployment' | tee -a /tmp/deploy_server.sh sudo cp /tmp/deploy_server.sh /etc/profile.d/deploy_server.sh +sudo rm /tmp/deploy_server.sh /usr/bin/az login --identity --output none echo "${USER} account ready for use with Azure SAP Automated Deployment" diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index a1b6349167..2eba6d0ced 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1365,7 +1365,15 @@ if [ "${deployment_system}" == sap_system ] ; then az storage blob upload --file sap-parameters.yaml --container-name tfvars/"${state_path}"/"${key}" --name sap-parameters.yaml --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none hosts_file=$(ls *_hosts.yaml) az storage blob upload --file "${hosts_file}" --container-name tfvars/"${state_path}"/"${key}" --name "${hosts_file}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +fi +if [ "${deployment_system}" == sap_landscape ] ; then + az storage blob upload --file "${system_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}${network_logical_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +fi +if [ "${deployment_system}" == sap_library ] ; then + deployer_config_information="${automation_config_directory}"/"${environment}""${region_code}" + az storage blob upload --file "${deployer_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none fi + exit $return_value diff --git a/deploy/scripts/setup_ado.sh b/deploy/scripts/setup_ado.sh index cbf7c6994c..1a983b083b 100755 --- a/deploy/scripts/setup_ado.sh +++ b/deploy/scripts/setup_ado.sh @@ -13,15 +13,10 @@ tar zxvf agent.tar.gz # run the configuration script -./config.sh +./config.sh # automatic start configuration after VM reboot -sudo ./svc.sh install azureadm +sudo ./svc.sh install "${USER}" # start the deamon sudo ./svc.sh start - -# Install dotnet for the web app -sudo snap install dotnet-sdk --classic --channel=3.1 -sudo snap alias dotnet-sdk.dotnet dotnet -export DOTNET_ROOT=/snap/dotnet-sdk/current \ No newline at end of file diff --git a/deploy/scripts/sync_deployer.sh b/deploy/scripts/sync_deployer.sh index 69c2dc37f4..8639be4e84 100755 --- a/deploy/scripts/sync_deployer.sh +++ b/deploy/scripts/sync_deployer.sh @@ -68,10 +68,10 @@ done files=$(az storage blob list --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --query "[].name" -o tsv --only-show-errors --output tsv) for name in $files; do - if [ -n $name ] ; then - echo "Downloading file: " $name - dirName=$(dirname $name) - mkdir -p $dirName + if [ -n "$name" ] ; then + echo "Downloading file: " "$name" + dirName=$(dirname "$name") + mkdir -p "$dirName" az storage blob download --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --file "${name}" --name "${name}" --only-show-errors --output none --no-progress fi diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 3ecce5b028..aeef2a126d 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -80,7 +80,7 @@ module "common_infrastructure" { key_vault = local.key_vault landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider @@ -142,8 +142,8 @@ module "hdb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, local.saplib_subscription_id) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options @@ -263,7 +263,7 @@ module "anydb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming options = local.options diff --git a/deploy/terraform/run/sap_system/output.tf b/deploy/terraform/run/sap_system/output.tf index d885e49689..f1ad2197e8 100644 --- a/deploy/terraform/run/sap_system/output.tf +++ b/deploy/terraform/run/sap_system/output.tf @@ -115,7 +115,7 @@ output "management_dns_subscription_id" { } output "management_dns_resourcegroup_name" { description = "Resource group name for DNS resource group" - value = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + value = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) } diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 0803a66ccc..7c3632ecf5 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -37,7 +37,7 @@ provider "azurerm" { provider "azurerm" { features {} alias = "dnsmanagement" - subscription_id = length(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id) > 1 ? data.terraform_remote_state.landscape.outputs.management_dns_subscription_id : length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + subscription_id = length(try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, "")) > 1 ? data.terraform_remote_state.landscape.outputs.management_dns_subscription_id : length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null client_id = local.cp_spn.client_id client_secret = local.cp_spn.client_secret tenant_id = local.cp_spn.tenant_id diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index e34382baf7..89e7301f81 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -61,14 +61,13 @@ subscription_id="${subscription_id}" tenant_id="${tenant_id}" local_user="${local_user}" use_webapp="${use_webapp}" -ansible_core_version="${ansible_core_version}" +ansible_version="${ansible_core_version}" TOKEN="${pat}" DEVURL="${ado_repo}" POOL="${pool}" AGENTNAME=$(hostname) - # # Ansible Version settings # diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index 89b4e78cb1..ef4d627f53 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -297,7 +297,7 @@ resource "azurerm_lb_rule" "fs" { resource "azurerm_availability_set" "scs" { provider = azurerm.main count = local.enable_deployment && local.use_scs_avset ? ( - max(length(local.scs_zones), 1)) : ( + length(var.ppg)) : ( 0 ) name = format("%s%s%s", @@ -309,10 +309,7 @@ resource "azurerm_availability_set" "scs" { resource_group_name = var.resource_group[0].name platform_update_domain_count = 20 platform_fault_domain_count = local.faultdomain_count - proximity_placement_group_id = try(local.scs_zonal_deployment ? ( - var.ppg[count.index % length(local.scs_zones)]) : ( - var.ppg[0] - ), null) + proximity_placement_group_id = var.ppg[count.index] managed = true tags = var.tags } @@ -325,7 +322,7 @@ resource "azurerm_availability_set" "scs" { resource "azurerm_availability_set" "app" { provider = azurerm.main count = local.use_app_avset && length(var.application_tier.avset_arm_ids) == 0 ? ( - max(length(local.app_zones), 1)) : ( + length(var.ppg)) : ( 0 ) name = format("%s%s%s", @@ -337,10 +334,7 @@ resource "azurerm_availability_set" "app" { resource_group_name = var.resource_group[0].name platform_update_domain_count = 20 platform_fault_domain_count = local.faultdomain_count - proximity_placement_group_id = try(local.app_zonal_deployment ? ( - var.ppg[count.index % local.app_zone_count]) : ( - var.ppg[0] - ), null) + proximity_placement_group_id = var.ppg[count.index] managed = true tags = var.tags } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf index 9e41a55356..2674a42f6d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf @@ -235,7 +235,7 @@ locals { scs_nic_secondary_ips = try(var.application_tier.scs_nic_secondary_ips, []) scs_admin_nic_ips = try(var.application_tier.scs_admin_nic_ips, []) - webdispatcher_loadbalancer_ips = try(var.application_tier.webdispatcher_loadbalancer_ips, []) + webdispatcher_loadbalancer_ips = try(var.application_tier.webdispatcher_loadbalancer_ips, []) web_nic_ips = try(var.application_tier.web_nic_ips, []) web_nic_secondary_ips = try(var.application_tier.web_nic_secondary_ips, []) web_admin_nic_ips = try(var.application_tier.web_admin_nic_ips, []) @@ -283,7 +283,7 @@ locals { // Default VM config should be merged with any the user passes in - app_sizing = local.enable_deployment && local.application_server_count > 0 ? ( + app_sizing = local.enable_deployment ? ( lookup(local.sizes.app, local.vm_sizing_dictionary_key)) : ( null ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 9424de5b79..73573a3b7c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -124,15 +124,16 @@ resource "azurerm_linux_virtual_machine" "app" { resource_group_name = var.resource_group[0].name proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - local.app_zonal_deployment ? var.ppg[count.index % max(local.app_zone_count, 1)] : var.ppg[0]) : ( + + var.ppg[count.index % max(length(var.ppg), 1)]) : ( null ) //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_app_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( - var.application_tier.avset_arm_ids[count.index % max(local.app_zone_count, 1)]) : ( - azurerm_availability_set.app[count.index % max(local.app_zone_count, 1)].id + var.application_tier.avset_arm_ids[count.index % max(length(var.ppg), 1)]) : ( + azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id )) : ( null ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 8b188e5f19..1aa569af37 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "scs" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" {