From 7a96e1459d1f24010b6095edb2a9fa6f02dc1e13 Mon Sep 17 00:00:00 2001 From: Kobi Hakimi Date: Tue, 20 Feb 2024 11:28:07 +0200 Subject: [PATCH] fix rosa deployment related to jira ticket: RHOAIENG-2683 Signed-off-by: Kobi Hakimi --- ods_ci/libs/Helpers.py | 2 +- ods_ci/utils/scripts/awsOps.py | 45 +++------- ods_ci/utils/scripts/ocm/ocm.py | 128 +++++++++++---------------- ods_ci/utils/scripts/rosa/rosa.py | 53 ++++++----- ods_ci/utils/scripts/rosa/rosaOps.py | 114 ++++++++++-------------- ods_ci/utils/scripts/util.py | 4 +- 6 files changed, 150 insertions(+), 196 deletions(-) diff --git a/ods_ci/libs/Helpers.py b/ods_ci/libs/Helpers.py index cd66070cf..ecef9e92f 100644 --- a/ods_ci/libs/Helpers.py +++ b/ods_ci/libs/Helpers.py @@ -63,7 +63,7 @@ def get_cluster_name(self, cluster_identifier): ocm_client = OpenshiftClusterManager() # to manipulate ocm_describe on line 45 ocm_client.cluster_name = cluster_identifier - cluster_name = ocm_client.ocm_describe(filter="--json | jq -r '.name'") + cluster_name = ocm_client.ocm_describe(jq_filter="--json | jq -r '.name'") cluster_name = cluster_name.strip("\n") return cluster_name diff --git a/ods_ci/utils/scripts/awsOps.py b/ods_ci/utils/scripts/awsOps.py index af81862f6..d7c33ee5c 100644 --- a/ods_ci/utils/scripts/awsOps.py +++ b/ods_ci/utils/scripts/awsOps.py @@ -1,42 +1,23 @@ -from logging import log from time import sleep +from logger import log from util import execute_command -def aws_configure(aws_access_key_id, aws_secret_access_key, aws_region): - """ - Runs aws configure and set the configuration required - for OpenShift/ROSA Installation - """ - cmd_aws_configure_key_id = [ - "aws", - "configure", - "set", - "default.aws_access_key_id", - aws_access_key_id, - ] - ret = execute_command(" ".join(cmd_aws_configure_key_id)) +def aws_configure_execute_cmd(aws_key, aws_value, aws_profile): + aws_configure_cmd = ["aws", "configure", "set", aws_key, aws_value, "--profile", aws_profile] + ret = execute_command(" ".join(aws_configure_cmd)) if ret is None: - print("Failed to configure aws_access_key_id") + log.error(f"Failed to configure {aws_key}") return ret sleep(1) - cmd_aws_configure_access_id = [ - "aws", - "configure", - "set", - "default.aws_secret_access_key", - aws_secret_access_key, - ] - ret = execute_command(" ".join(cmd_aws_configure_access_id)) - if ret is None: - print("Failed to configure aws_secret_access_key") - return ret - sleep(1) - cmd_aws_configure_region = ["aws", "configure", "set", "default.region", aws_region] - ret = execute_command(" ".join(cmd_aws_configure_region)) - if ret is None: - print("Failed to configure region") - return ret +def aws_configure(aws_access_key_id, aws_secret_access_key, aws_region, aws_profile="default"): + """ + Runs aws configure and set the configuration required + for OpenShift/ROSA Installation + """ + aws_configure_execute_cmd(aws_key="aws_access_key_id", aws_value=aws_access_key_id, aws_profile=aws_profile) + aws_configure_execute_cmd(aws_key="aws_secret_access_key", aws_value=aws_secret_access_key, aws_profile=aws_profile) + aws_configure_execute_cmd(aws_key="region", aws_value=aws_region, aws_profile=aws_profile) diff --git a/ods_ci/utils/scripts/ocm/ocm.py b/ods_ci/utils/scripts/ocm/ocm.py index 23c4e5466..b71192bd5 100644 --- a/ods_ci/utils/scripts/ocm/ocm.py +++ b/ods_ci/utils/scripts/ocm/ocm.py @@ -76,6 +76,7 @@ def __init__(self, args={}): self.update_ocm_channel_json = args.get("update_ocm_channel_json") self.update_policies_json = args.get("update_policies_json") self.service_account_file = "create_gcp_sa_json.json" + self.cluster_id = "" ocm_env = glob.glob(dir_path + "/../../../ocm.json.*") if ocm_env != []: os.environ["OCM_CONFIG"] = ocm_env[0] @@ -109,16 +110,16 @@ def ocm_cli_install(self): log.info("Failed to give execute permission to ocm cli binary") sys.exit(1) - def ocm_describe(self, filter=""): + def ocm_describe(self, jq_filter=""): """Describes cluster and returns cluster info""" cluster_id = self.get_osd_cluster_id() - cmd = "ocm describe cluster {}".format(cluster_id) - if filter != "": - cmd += " " + filter + cmd = f"ocm describe cluster {cluster_id}" + if jq_filter: + cmd += f" {jq_filter}" ret = execute_command(cmd) if ret is None or "Error: Can't retrieve cluster for key" in ret: - log.info("ocm describe for cluster {} failed".format(self.cluster_name)) + log.info(f"ocm describe for cluster {self.cluster_name} failed") return None return ret @@ -126,9 +127,9 @@ def is_osd_cluster_exists(self): """Checks if cluster exists""" ret = self.ocm_describe() if ret is None: - log.info("ocm cluster with name {} not exists!".format(self.cluster_name)) + log.info(f"ocm cluster with name {self.cluster_name} not exists!") return False - log.info("ocm cluster with name {} exists!".format(self.cluster_name)) + log.info(f"ocm cluster with name {self.cluster_name} exists!") return True def osd_cluster_create(self): @@ -147,8 +148,7 @@ def osd_cluster_create(self): if self.channel_group == "candidate": chan_grp = "--channel-group {}".format(self.channel_group) - version_cmd = 'ocm list versions {} | grep -w "'.format(chan_grp) + re.escape(version) + '*"' - log.info("CMD: {}".format(version_cmd)) + version_cmd = f'ocm list versions {chan_grp} | grep -w "{re.escape(version)}*"' versions = execute_command(version_cmd) if versions is not None: version = [ver for ver in versions.split("\n") if ver][-1] @@ -213,7 +213,6 @@ def osd_cluster_create(self): self.cluster_name, ) ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to create osd cluster {}".format(self.cluster_name)) @@ -222,30 +221,32 @@ def osd_cluster_create(self): def get_osd_cluster_id(self): """Gets osd cluster ID""" - cmd = "ocm list clusters -p search=\"name = '{}' or id = '{}'\" --columns id --no-headers".format( - self.cluster_name, self.cluster_name - ) - ret = execute_command(cmd) - if ret is None: - log.info("Unable to retrieve cluster ID for cluster name {}. EXITING".format(self.cluster_name)) - sys.exit(1) - return ret.strip("\n") + if not self.cluster_id: + cmd = "ocm list clusters -p search=\"name = '{}' or id = '{}'\" --columns id --no-headers".format( + self.cluster_name, self.cluster_name + ) + cluster_id = execute_command(cmd) + if cluster_id in [None, ""]: + log.error(f"Unable to retrieve cluster ID for cluster name {self.cluster_name}. EXITING") + sys.exit(1) + self.cluster_id = cluster_id.strip("\n") + return self.cluster_id def get_osd_cluster_state(self): """Gets osd cluster state""" - cluster_state = self.ocm_describe(filter="--json | jq -r '.state'") - if cluster_state is None: - log.info("Unable to retrieve cluster state for cluster name {}. EXITING".format(self.cluster_name)) + cluster_state = self.ocm_describe(jq_filter="--json | jq -r '.state'") + if cluster_state in [None, ""]: + log.error(f"Unable to retrieve cluster state for cluster name {self.cluster_name}. EXITING") sys.exit(1) return cluster_state.strip("\n") def get_osd_cluster_version(self): """Gets osd cluster version""" - cluster_version = self.ocm_describe(filter="--json | jq -r '.version.raw_id'") + cluster_version = self.ocm_describe(jq_filter="--json | jq -r '.version.raw_id'") if cluster_version is None: - log.info("Unable to retrieve cluster version for cluster name {}. EXITING".format(self.cluster_name)) + log.error(f"Unable to retrieve cluster version for cluster name {self.cluster_name}. EXITING") sys.exit(1) return cluster_version.strip("\n") @@ -253,9 +254,9 @@ def get_osd_cluster_console_url(self): """Gets osd cluster console url""" filter_str = "--json | jq -r '.console.url'" - cluster_console_url = self.ocm_describe(filter=filter_str) - if cluster_console_url is None: - log.info("Unable to retrieve cluster console url for cluster name {}. EXITING".format(self.cluster_name)) + cluster_console_url = self.ocm_describe(jq_filter=filter_str) + if cluster_console_url in [None, ""]: + log.error(f"Unable to retrieve cluster console url for cluster name {self.cluster_name}. EXITING") sys.exit(1) return cluster_console_url.strip("\n") @@ -314,13 +315,13 @@ def wait_for_osd_cluster_to_be_ready(self, timeout=7200): check_flag = True break elif cluster_state == "error": - log.info("{} is in error state. Hence exiting!!".format(self.cluster_name)) + log.error(f"{self.cluster_name} is in error state. Hence exiting!!") sys.exit(1) time.sleep(60) count += 60 if not check_flag: - log.info("{} not in ready state even after 2 hours. EXITING".format(self.cluster_name)) + log.error(f"{self.cluster_name} not in ready state even after 2 hours. EXITING") sys.exit(1) def _render_template(self, template_file, output_file, replace_vars): @@ -336,7 +337,7 @@ def _render_template(self, template_file, output_file, replace_vars): with open(output_file, "w") as fh: fh.write(outputText) except: - log.info("Failed to render template and create json file {}".format(output_file)) + log.error(f"Failed to render template and create json file {output_file}") sys.exit(1) def is_addon_installed(self, addon_name="managed-odh"): @@ -395,7 +396,6 @@ def add_machine_pool(self): self.pool_name, ) ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to add machine pool {}".format(self.cluster_name)) @@ -468,10 +468,9 @@ def uninstall_addon(self, addon_name="managed-odh", exit_on_failure=True): cmd = "ocm --v={} delete /api/clusters_mgmt/v1/clusters/{}/addons/{}".format( self.ocm_verbose_level, cluster_id, addon_name ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info("Failed to uninstall {} addon on cluster {}".format(addon_name, self.cluster_name)) + log.info(f"Failed to uninstall {addon_name} addon on cluster {self.cluster_name}") if exit_on_failure: sys.exit(1) @@ -492,7 +491,6 @@ def uninstall_rhods(self): def is_secret_existent(self, secret_name, namespace): cmd = "oc get secret {} -n {}".format(secret_name, namespace) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) log.info("\nRET: {}".format(ret)) if ret is None or "Error" in ret: @@ -543,7 +541,6 @@ def install_addon( cmd = "ocm --v={} post /api/clusters_mgmt/v1/clusters/{}/addons --body={}".format( self.ocm_verbose_level, cluster_id, output_file ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if len(fields_to_hide) > 0: ret = self.hide_values_in_op_json(fields_to_hide, ret) @@ -567,7 +564,6 @@ def is_oc_obj_existent(self, kind, name, namespace, retries=30, retry_sec_interv found = False for retry in range(retries): cmd = """oc get {} {} -n {}""".format(kind, name, namespace) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None or "Error" in ret: log.info( @@ -614,7 +610,6 @@ def install_rhoam_addon(self, exit_on_failure=True): cmd = """oc patch rhmi rhoam -n redhat-rhoam-operator \ --type=merge --patch '{\"spec\":{\"useClusterStorage\": \"false\"}}'""" - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) log.info("\nRET: {}".format(ret)) if ret is None: @@ -714,7 +709,6 @@ def create_idp(self): f"ocm --v={self.ocm_verbose_level} create idp -c {cluster_id} -t {self.idp_type} -n {self.idp_name}" f" --username {self.htpasswd_cluster_admin} --password {self.htpasswd_cluster_password}" ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to add identity provider of type {}".format(self.idp_type)) @@ -733,7 +727,6 @@ def create_idp(self): # time.sleep(10) - # log.info("CMD: {}".format(cmd)) # ret = execute_command(cmd) # if ret is None: # log.info( @@ -765,7 +758,6 @@ def create_idp(self): os.path.abspath(os.path.dirname(__file__)) + "/../../../configs/templates/ldap/ldap.yaml_replaced" ) cmd = "oc apply -f {}".format(ldap_yaml_file) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to deploy openldap application") @@ -784,7 +776,6 @@ def create_idp(self): f"ocm --v={self.ocm_verbose_level} post /api/clusters_mgmt/v1/" f"clusters/{cluster_id}/identity_providers --body={output_file}" ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to add ldap identity provider") @@ -794,8 +785,7 @@ def create_idp(self): def delete_idp(self): """Deletes Identity Provider""" - cmd = "ocm --v={} delete idp -c {} {}".format(self.ocm_verbose_level, self.cluster_name, self.idp_name) - log.info("CMD: {}".format(cmd)) + cmd = f"ocm --v={self.ocm_verbose_level} delete idp -c {self.cluster_name} {self.idp_name}" ret = execute_command(cmd) if ret is None: log.info("Failed to delete identity provider of type {}".format(self.idp_name)) @@ -812,7 +802,6 @@ def add_user_to_group(self, user="", group="cluster-admins"): cluster_id = self.get_osd_cluster_id() cmd = f"ocm --v={self.ocm_verbose_level} create user {user} --cluster {cluster_id} --group={group}" - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to add user {} to group {}".format(user, group)) @@ -825,7 +814,6 @@ def delete_user(self, user="", group="cluster-admins"): cmd = "ocm --v={} delete user {} --cluster {} --group={}".format( self.ocm_verbose_level, user, self.cluster_name, group ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to delete user {} of group {}".format(user, group)) @@ -834,7 +822,6 @@ def create_group(self, group_name): """Creates new group""" cmd = "oc adm groups new {}".format(group_name) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to add group {}".format(group_name)) @@ -868,12 +855,10 @@ def add_users_to_rhods_group(self): # given user to group cmd = "oc get users" - log.info("CMD: {}".format(cmd)) users_list = execute_command(cmd) log.info("Users present in cluster: {}".format(users_list)) cmd = "oc get groups" - log.info("CMD: {}".format(cmd)) groups_list = execute_command(cmd) log.info("Groups present in cluster: {}".format(groups_list)) @@ -942,10 +927,9 @@ def ocm_login(self): cmd += "--url=staging" cmd = "OCM_CONFIG=ocm.json." + self.testing_platform + " " + cmd - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info("Failed to login to aws openshift platform using token") + log.error("Failed to login to aws openshift platform using token") sys.exit(1) os.environ["OCM_CONFIG"] = "ocm.json." + self.testing_platform @@ -953,39 +937,36 @@ def delete_cluster(self): """Delete OSD Cluster""" cluster_id = self.get_osd_cluster_id() - cmd = "ocm --v={} delete cluster {}".format(self.ocm_verbose_level, cluster_id) - log.info("CMD: {}".format(cmd)) + cmd = f"ocm --v={self.ocm_verbose_level} delete cluster {cluster_id}" ret = execute_command(cmd) if ret is None: - log.info("Failed to delete osd cluster {}".format(self.cluster_name)) + log.error("Failed to delete osd cluster {}".format(self.cluster_name)) sys.exit(1) self.wait_for_osd_cluster_to_get_deleted() def wait_for_osd_cluster_to_get_deleted(self, timeout=3600): """Waits for cluster to get deleted""" - cluster_exists = self.is_osd_cluster_exists() count = 0 check_flag = False while count <= timeout: cluster_exists = self.is_osd_cluster_exists() if not cluster_exists: - log.info("{} is deleted".format(self.cluster_name)) + log.info(f"{self.cluster_name} is deleted") check_flag = True break time.sleep(60) count += 60 if not check_flag: - log.info("{} not deleted even after an hour. EXITING".format(self.cluster_name)) + log.info(f"{self.cluster_name} not deleted even after timeout of {timeout / 60} minutes. EXITING") sys.exit(1) def hibernate_cluster(self): """Hibernate OSD Cluster""" cluster_id = self.get_osd_cluster_id() - cmd = "ocm --v={} hibernate cluster {}".format(self.ocm_verbose_level, cluster_id) - log.info("CMD: {}".format(cmd)) + cmd = f"ocm --v={self.ocm_verbose_level} hibernate cluster {cluster_id}" ret = execute_command(cmd) if ret is None: log.info("Failed to hibernate osd cluster {}".format(self.cluster_name)) @@ -1017,7 +998,6 @@ def resume_cluster(self): cluster_id = self.get_osd_cluster_id() cmd = "ocm --v={} resume cluster {}".format(self.ocm_verbose_level, cluster_id) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to resume osd cluster {}".format(self.cluster_name)) @@ -1054,7 +1034,6 @@ def update_notification_email_address(self, addon_name, email_address, exit_on_f cmd = "ocm --v={} patch /api/clusters_mgmt/v1/clusters/{}/addons/{} --body={}".format( self.ocm_verbose_level, cluster_id, addon_name, output_file ) - log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: log.info("Failed to update email address to {} addon on cluster {}".format(addon_name, self.cluster_name)) @@ -1128,7 +1107,6 @@ def wait_for_isv_installation_to_complete(self, isv_name, namespace="openshift-o "$(oc get csv -n {} | grep -i {} | awk '{}') -o json " "| jq '.status.phase'".format(namespace, isv_name, "{print $1}") ) - log.info("CMD: {}".format(cmd)) while count <= timeout: isv_state = execute_command(cmd) if isv_state.replace('"', "").strip() == "Succeeded": @@ -1385,7 +1363,7 @@ def update_ocm_policy(self): # Argument parsers for ocm_login ocm_login_parser = subparsers.add_parser( "ocm_login", - help=("Login to OCM using token"), + help="Login to OCM using token", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) @@ -1413,7 +1391,7 @@ def update_ocm_policy(self): # Argument parsers for create_cluster create_cluster_parser = subparsers.add_parser( "create_cluster", - help=("Create managed OpenShift Dedicated v4 clusters via OCM."), + help="Create managed OpenShift Dedicated v4 clusters via OCM.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) @@ -1686,7 +1664,7 @@ def update_ocm_policy(self): # Argument parsers for delete_cluster delete_cluster_parser = subparsers.add_parser( "delete_cluster", - help=("Delete managed OpenShift Dedicated v4 clusters via OCM."), + help="Delete managed OpenShift Dedicated v4 clusters via OCM.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) delete_cluster_parser.add_argument( @@ -1702,7 +1680,7 @@ def update_ocm_policy(self): # Argument parsers for hibernate_cluster hibernate_cluster_parser = subparsers.add_parser( "hibernate_cluster", - help=("Hibernates managed OpenShift Dedicated v4 clusters via OCM."), + help="Hibernates managed OpenShift Dedicated v4 clusters via OCM.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) hibernate_cluster_parser.add_argument( @@ -1718,7 +1696,7 @@ def update_ocm_policy(self): # Argument parsers for resume_cluster resume_cluster_parser = subparsers.add_parser( "resume_cluster", - help=("Resumes managed OpenShift Dedicated v4 clusters via OCM."), + help="Resumes managed OpenShift Dedicated v4 clusters via OCM.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) resume_cluster_parser.add_argument( @@ -1734,7 +1712,7 @@ def update_ocm_policy(self): # Argument parsers for delete_idp delete_idp_parser = subparsers.add_parser( "delete_idp", - help=("Delete a specific identity provider for a cluster."), + help="Delete a specific identity provider for a cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_delete_idp_parser = delete_idp_parser._action_groups.pop() @@ -1761,7 +1739,7 @@ def update_ocm_policy(self): # Argument parsers for get_osd_cluster_info info_parser = subparsers.add_parser( "get_osd_cluster_info", - help=("Gets the cluster information"), + help="Gets the cluster information", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_info_parser = info_parser._action_groups.pop() @@ -1781,7 +1759,7 @@ def update_ocm_policy(self): # Argument parsers for update_osd_cluster_info update_info_parser = subparsers.add_parser( "update_osd_cluster_info", - help=("Updates the cluster information"), + help="Updates the cluster information", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_update_info_parser = update_info_parser._action_groups.pop() @@ -1817,7 +1795,7 @@ def update_ocm_policy(self): # Argument parsers for install_rhods_addon install_rhods_parser = subparsers.add_parser( "install_rhods_addon", - help=("Install rhods addon cluster."), + help="Install rhods addon cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) required_install_rhods_parser = install_rhods_parser.add_argument_group("required arguments") @@ -1841,7 +1819,7 @@ def update_ocm_policy(self): # Argument parsers for install_rhods_addon install_gpu_parser = subparsers.add_parser( "install_gpu_addon", - help=("Install gpu addon cluster."), + help="Install gpu addon cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) required_install_gpu_parser = install_gpu_parser.add_argument_group("required arguments") @@ -1858,7 +1836,7 @@ def update_ocm_policy(self): # Argument parsers for create_cluster add_machinepool_parser = subparsers.add_parser( "add_machine_pool", - help=("Adds machine pool to given cluster via OCM."), + help="Adds machine pool to given cluster via OCM.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) @@ -1917,7 +1895,7 @@ def update_ocm_policy(self): # Argument parsers for uninstall_rhods_addon uninstall_rhods_parser = subparsers.add_parser( "uninstall_rhods_addon", - help=("Uninstall rhods addon cluster."), + help="Uninstall rhods addon cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) required_uninstall_rhods_parser = uninstall_rhods_parser.add_argument_group("required arguments") @@ -1934,7 +1912,7 @@ def update_ocm_policy(self): # Argument parsers for install_rhoam_addon install_rhoam_parser = subparsers.add_parser( "install_rhoam_addon", - help=("Install rhoam addon cluster."), + help="Install rhoam addon cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) required_install_rhoam_parser = install_rhoam_parser.add_argument_group("required arguments") @@ -1951,7 +1929,7 @@ def update_ocm_policy(self): # Argument parsers for uninstall_rhoam_addon uninstall_rhoam_parser = subparsers.add_parser( "uninstall_rhoam_addon", - help=("Uninstall rhoam addon cluster."), + help="Uninstall rhoam addon cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) required_uninstall_rhoam_parser = uninstall_rhoam_parser.add_argument_group("required arguments") @@ -1968,7 +1946,7 @@ def update_ocm_policy(self): # Argument parsers for create_idp create_idp_parser = subparsers.add_parser( "create_idp", - help=("Add an Identity providers to determine how users log into the cluster."), + help="Add an Identity providers to determine how users log into the cluster.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_create_idp_parser = create_idp_parser._action_groups.pop() diff --git a/ods_ci/utils/scripts/rosa/rosa.py b/ods_ci/utils/scripts/rosa/rosa.py index 70b296f43..580e16f85 100644 --- a/ods_ci/utils/scripts/rosa/rosa.py +++ b/ods_ci/utils/scripts/rosa/rosa.py @@ -6,7 +6,7 @@ sys.path.append(dir_path + "/../") from awsOps import aws_configure from logger import log -from rosaOps import create_account_roles, rosa_create_cluster, wait_for_osd_cluster_to_be_ready +from rosaOps import create_account_roles, rosa_create_cluster, rosa_whoami, wait_for_osd_cluster_to_be_ready class RosaClusterManager: @@ -14,7 +14,7 @@ def __init__(self, args={}): self.aws_access_key_id = args.get("aws_access_key_id") self.aws_secret_access_key = args.get("aws_secret_access_key") self.aws_region = args.get("aws_region") - self.profile = args.get("profile") + self.aws_profile = args.get("aws_profile") self.cluster_name = args.get("cluster_name") self.compute_nodes = args.get("compute_nodes") self.compute_machine_type = args.get("compute_machine_type") @@ -22,15 +22,17 @@ def __init__(self, args={}): self.channel_name = args.get("channel_name") def create_rosa_cluster(self): - print( - self.cluster_name, - self.aws_region, - self.channel_name, - self.compute_nodes, - self.compute_machine_type, - self.rosa_version, + log.info( + "Creating ROSA cluster with the following details:\n" + f"Name: {self.cluster_name}\n" + f"Region: {self.aws_region}\n" + f"Channel: {self.channel_name}\n" + f"Compute Nodes: {self.compute_nodes}\n" + f"Compute machine type: {self.compute_machine_type}\n" + f"Rosa version: {self.rosa_version}\n" ) - aws_configure(self.aws_access_key_id, self.aws_secret_access_key, self.aws_region) + aws_configure(self.aws_access_key_id, self.aws_secret_access_key, self.aws_region, self.aws_profile) + rosa_whoami() create_account_roles() rosa_create_cluster( self.cluster_name, @@ -51,12 +53,13 @@ def main(): # Argument parsers for create_cluster subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") - rosaClusterCreate_parser = subparsers.add_parser( + rosa_cluster_create_parser = subparsers.add_parser( "create_rosa_cluster", - help=("create ROSA clusters using openshift installer"), + help="create ROSA clusters using openshift installer", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - rosaClusterCreate_parser.add_argument( + + rosa_cluster_create_parser.add_argument( "--aws-access-key-id", required=True, action="store", @@ -64,7 +67,7 @@ def main(): help="AWS access key ID", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--aws-secret-access-key", required=True, action="store", @@ -72,7 +75,7 @@ def main(): help="AWS secret access key", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--aws_region", required=True, action="store", @@ -80,7 +83,15 @@ def main(): help="AWS aws_region", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( + "--aws_profile", + required=False, + action="store", + dest="aws_profile", + help="AWS aws_profile", + ) + + rosa_cluster_create_parser.add_argument( "--cluster-name", required=True, action="store", @@ -88,7 +99,7 @@ def main(): help="ROSA cluster name", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--compute_nodes", required=True, action="store", @@ -96,7 +107,7 @@ def main(): help="Number of compute nodes", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--compute-machine-type", required=True, action="store", @@ -104,14 +115,14 @@ def main(): help="Compute machine type", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--osd-version", required=True, action="store", dest="rosa_version", help="ROSA version", ) - rosaClusterCreate_parser.add_argument( + rosa_cluster_create_parser.add_argument( "--channel-name", required=True, action="store", @@ -120,7 +131,7 @@ def main(): ) rosa_cluster_manager = RosaClusterManager() - rosaClusterCreate_parser.set_defaults(func=rosa_cluster_manager.create_rosa_cluster) + rosa_cluster_create_parser.set_defaults(func=rosa_cluster_manager.create_rosa_cluster) args = parser.parse_args(namespace=rosa_cluster_manager) if hasattr(args, "func"): args.func() diff --git a/ods_ci/utils/scripts/rosa/rosaOps.py b/ods_ci/utils/scripts/rosa/rosaOps.py index d7dd9cdcc..35d313f7a 100644 --- a/ods_ci/utils/scripts/rosa/rosaOps.py +++ b/ods_ci/utils/scripts/rosa/rosaOps.py @@ -1,10 +1,18 @@ import sys -from logging import log from time import sleep +from logger import log from util import execute_command +def rosa_whoami(): + cmd_rosa_whoami = [ + "rosa", + "whoami", + ] + execute_command(" ".join(cmd_rosa_whoami)) + + def create_account_roles(): cmd_create_account_roles = [ "rosa", @@ -14,10 +22,9 @@ def create_account_roles(): "auto", "--yes", ] - ret = execute_command(" ".join(cmd_create_account_roles)) if ret is None: - print("Failed to Create account roles") + log.error("Failed to Create account roles") return ret @@ -30,47 +37,28 @@ def rosa_create_cluster( rosa_version, sts=True, ): + cmd_rosa_create_cluster = [ + "rosa", + "create", + "cluster", + "--cluster-name", + cluster_name, + "--replicas ", + compute_nodes, + "--region", + region, + "--compute-machine-type", + compute_machine_type, + "--yes", + "--version", + rosa_version, + "--channel-group", + channel_name, + ] + if sts is True: - cmd_rosa_create_cluster = [ - "rosa", - "create", - "cluster", - "--cluster-name", - cluster_name, - "--replicas ", - compute_nodes, - "--region", - region, - "--compute-machine-type", - compute_machine_type, - "--yes", - "--sts", - "--version", - rosa_version, - "--channel-group", - channel_name, - ] - execute_command(" ".join(cmd_rosa_create_cluster)) - else: - cmd_rosa_create_cluster = [ - "rosa", - "create", - "cluster", - "--cluster-name", - cluster_name, - "--replicas ", - compute_nodes, - "--region", - region, - "--compute-machine-type", - compute_machine_type, - "--yes", - "--version", - rosa_version, - "--channel-group", - channel_name, - ] - execute_command(" ".join(cmd_rosa_create_cluster)) + cmd_rosa_create_cluster.append("--sts") + execute_command(" ".join(cmd_rosa_create_cluster)) cmd_create_operator_roles = [ "rosa", @@ -85,9 +73,8 @@ def rosa_create_cluster( "--yes", ] ret = execute_command(" ".join(cmd_create_operator_roles)) - print(" ".join(cmd_create_operator_roles)) if ret is None: - print("Failed to Create operator-roles") + log.error("Failed to Create operator-roles") return ret cmd_create_oidc_provider = [ @@ -104,30 +91,25 @@ def rosa_create_cluster( ] ret = execute_command(" ".join(cmd_create_oidc_provider)) if ret is None: - print("Failed to Create oidc roles") + log.error("Failed to Create oidc roles") return ret + rosa_describe(cluster_name=cluster_name) + + +def rosa_describe(cluster_name, jq_filter=""): + """Describes cluster and returns cluster info""" cmd_check_cluster = [ "rosa", "describe", "cluster", - "--cluster={}".format(cluster_name), + f"--cluster={cluster_name}", ] + if jq_filter: + cmd_check_cluster.append(jq_filter) ret = execute_command(" ".join(cmd_check_cluster)) if ret is None: - print("Failed creation failed") - return ret - print("ret = {}".format(ret)) - - -def rosa_describe(cluster_name, filter=""): - """Describes cluster and returns cluster info""" - cmd = "rosa describe cluster --cluster {}".format(cluster_name) - if filter != "": - cmd += " " + filter - ret = execute_command(cmd) - if ret is None: - print("rosa describe for cluster {} failed".format(cluster_name)) + log.error(f"rosa describe for cluster {cluster_name} failed") return None return ret @@ -135,9 +117,9 @@ def rosa_describe(cluster_name, filter=""): def get_rosa_cluster_state(cluster_name): """Gets osd cluster state""" - cluster_state = rosa_describe(cluster_name, filter="--output json | jq -r '.state'") + cluster_state = rosa_describe(cluster_name, jq_filter="--output json | jq -r '.state'") if cluster_state is None: - print("Unable to retrieve cluster state for cluster name {}. EXITING".format(cluster_name)) + log.error(f"Unable to retrieve cluster state for cluster name {cluster_name}. EXITING") sys.exit(1) cluster_state = cluster_state.strip("\n") return cluster_state @@ -146,22 +128,22 @@ def get_rosa_cluster_state(cluster_name): def wait_for_osd_cluster_to_be_ready(cluster_name, timeout=7200): """Waits for cluster to be in ready state""" - print("Waiting for cluster to be ready") + log.info("Waiting for cluster to be ready") cluster_state = get_rosa_cluster_state(cluster_name) count = 0 check_flag = False while count <= timeout: cluster_state = get_rosa_cluster_state(cluster_name) if cluster_state == "ready": - print("{} is in ready state".format(cluster_name)) + log.info(f"{cluster_name} is in ready state") check_flag = True break elif cluster_state == "error": - print("{} is in error state. Hence exiting!!".format(cluster_name)) + log.error(f"{cluster_name} is in error state. Hence exiting!!") sys.exit(1) sleep(60) count += 60 if not check_flag: - print("{} not in ready state even after 2 hours. EXITING".format(cluster_name)) + log.error(f"{cluster_name} not in ready state even after 2 hours. EXITING") sys.exit(1) diff --git a/ods_ci/utils/scripts/util.py b/ods_ci/utils/scripts/util.py index 9f6d40191..b433cff88 100644 --- a/ods_ci/utils/scripts/util.py +++ b/ods_ci/utils/scripts/util.py @@ -8,6 +8,7 @@ import jinja2 import yaml +from logger import log def clone_config_repo(**kwargs): @@ -58,6 +59,7 @@ def execute_command(cmd): """ output = "" try: + log.info(f"CMD: {cmd}") with subprocess.Popen( cmd, shell=True, @@ -71,10 +73,10 @@ def execute_command(cmd): line = p.stdout.readline() if line != "": output += line + "\n" - print(line) elif p.poll() is not None: break sys.stdout.flush() + log.info(f"OUTPUT: {output}") return output except: return None