diff --git a/ods_ci/libs/DataSciencePipelinesAPI.py b/ods_ci/libs/DataSciencePipelinesAPI.py index f73d3bb40..3ff5f1a98 100644 --- a/ods_ci/libs/DataSciencePipelinesAPI.py +++ b/ods_ci/libs/DataSciencePipelinesAPI.py @@ -24,9 +24,7 @@ def wait_until_openshift_pipelines_operator_is_deployed(self): count = 0 while deployment_count != 1 and count < 30: deployments = [] - response, _ = self.run_oc( - "oc get deployment -n openshift-operators openshift-pipelines-operator -o json" - ) + response, _ = self.run_oc("oc get deployment -n openshift-operators openshift-pipelines-operator -o json") try: response = json.loads(response) if ( @@ -45,9 +43,7 @@ def wait_until_openshift_pipelines_operator_is_deployed(self): while pipeline_run_crd_count < 1 and count < 60: # https://github.com/opendatahub-io/odh-dashboard/issues/1673 # It is possible to start the Pipeline Server without pipelineruns.tekton.dev CRD - pipeline_run_crd_count = self.count_pods( - "oc get crd pipelineruns.tekton.dev", 1 - ) + pipeline_run_crd_count = self.count_pods("oc get crd pipelineruns.tekton.dev", 1) time.sleep(1) count += 1 assert pipeline_run_crd_count == 1 @@ -87,16 +83,12 @@ def login_and_wait_dsp_route( self.route = "" count = 0 while self.route == "" and count < 60: - self.route, _ = self.run_oc( - f"oc get route -n {project} {route_name} --template={{{{.spec.host}}}}" - ) + self.route, _ = self.run_oc(f"oc get route -n {project} {route_name} --template={{{{.spec.host}}}}") time.sleep(1) count += 1 assert self.route != "", "Route must not be empty" - print( - f"Waiting for Data Science Pipeline route to be ready to avoid firing false alerts: {self.route}" - ) + print(f"Waiting for Data Science Pipeline route to be ready to avoid firing false alerts: {self.route}") time.sleep(45) status = -1 count = 0 @@ -116,16 +108,12 @@ def login_and_wait_dsp_route( @keyword def remove_pipeline_project(self, project): - print( - f"We are removing the project({project}) because we could run the test multiple times" - ) + print(f"We are removing the project({project}) because we could run the test multiple times") self.run_oc(f"oc delete project {project} --wait=true --force=true") print("Wait because it could be in Terminating status") count = 0 while count < 30: - project_status, error = self.run_oc( - f"oc get project {project} --template={{{{.status.phase}}}}" - ) + project_status, error = self.run_oc(f"oc get project {project} --template={{{{.status.phase}}}}") print(f"Project status: {project_status}") print(f"Error message: {error}") if project_status == "": @@ -241,9 +229,7 @@ def delete_pipeline(self, pipeline_id): @keyword def add_role_to_user(self, name, user, project): - output, error = self.run_oc( - f"oc policy add-role-to-user {name} {user} -n {project} --role-namespace={project}" - ) + output, error = self.run_oc(f"oc policy add-role-to-user {name} {user} -n {project} --role-namespace={project}") print(output, "->", error) @keyword @@ -271,9 +257,7 @@ def count_pods(self, oc_command, pod_criteria, timeout=30): count += 1 return pod_count - def count_running_pods( - self, oc_command, name_startswith, status_phase, pod_criteria, timeout=30 - ): + def count_running_pods(self, oc_command, name_startswith, status_phase, pod_criteria, timeout=30): pod_count = 0 count = 0 while pod_count != pod_criteria and count < timeout: @@ -309,12 +293,7 @@ def get_default_storage(self): result = json.loads(result) for storage_class in result["items"]: if "annotations" in storage_class["metadata"]: - if ( - storage_class["metadata"]["annotations"][ - "storageclass.kubernetes.io/is-default-class" - ] - == "true" - ): + if storage_class["metadata"]["annotations"]["storageclass.kubernetes.io/is-default-class"] == "true": break return storage_class["metadata"]["name"] diff --git a/ods_ci/libs/DataSciencePipelinesKfpTekton.py b/ods_ci/libs/DataSciencePipelinesKfpTekton.py index 70d469c03..4aa3a64ed 100644 --- a/ods_ci/libs/DataSciencePipelinesKfpTekton.py +++ b/ods_ci/libs/DataSciencePipelinesKfpTekton.py @@ -10,7 +10,9 @@ class DataSciencePipelinesKfpTekton: - base_image = "registry.redhat.io/ubi8/python-39@sha256:3523b184212e1f2243e76d8094ab52b01ea3015471471290d011625e1763af61" + base_image = ( + "registry.redhat.io/ubi8/python-39@sha256:3523b184212e1f2243e76d8094ab52b01ea3015471471290d011625e1763af61" + ) # init should not have a call to external system, otherwise dry-run will fail def __init__(self): @@ -68,9 +70,7 @@ def get_secret(self, api, project, name): return json.loads(secret_json) def get_bucket_name(self, api, project): - bucket_name, _ = api.run_oc( - f"oc get dspa -n {project} pipelines-definition -o json" - ) + bucket_name, _ = api.run_oc(f"oc get dspa -n {project} pipelines-definition -o json") objectStorage = json.loads(bucket_name)["spec"]["objectStorage"] if "minio" in objectStorage: return objectStorage["minio"]["bucket"] @@ -79,9 +79,7 @@ def get_bucket_name(self, api, project): def import_souce_code(self, path): module_name = os.path.basename(path).replace("-", "_") - spec = importlib.util.spec_from_loader( - module_name, importlib.machinery.SourceFileLoader(module_name, path) - ) + spec = importlib.util.spec_from_loader(module_name, importlib.machinery.SourceFileLoader(module_name, path)) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) sys.modules[module_name] = module @@ -92,9 +90,7 @@ def kfp_tekton_create_run_from_pipeline_func( self, user, pwd, project, route_name, source_code, fn, current_path=None ): client, api = self.get_client(user, pwd, project, route_name) - mlpipeline_minio_artifact_secret = self.get_secret( - api, project, "mlpipeline-minio-artifact" - ) + mlpipeline_minio_artifact_secret = self.get_secret(api, project, "mlpipeline-minio-artifact") bucket_name = self.get_bucket_name(api, project) # the current path is from where you are running the script # sh ods_ci/run_robot_test.sh @@ -111,9 +107,7 @@ def kfp_tekton_create_run_from_pipeline_func( result = client.create_run_from_pipeline_func( pipeline_func=pipeline, arguments={ - "mlpipeline_minio_artifact_secret": mlpipeline_minio_artifact_secret[ - "data" - ], + "mlpipeline_minio_artifact_secret": mlpipeline_minio_artifact_secret["data"], "bucket_name": bucket_name, "openshift_server": self.api.get_openshift_server(), "openshift_token": self.api.get_openshift_token(), @@ -126,8 +120,6 @@ def kfp_tekton_create_run_from_pipeline_func( # we are calling DataSciencePipelinesAPI because of https://github.com/kubeflow/kfp-tekton/issues/1223 # Waiting for a backport https://github.com/kubeflow/kfp-tekton/pull/1234 @keyword - def kfp_tekton_wait_for_run_completion( - self, user, pwd, project, route_name, run_result, timeout=160 - ): + def kfp_tekton_wait_for_run_completion(self, user, pwd, project, route_name, run_result, timeout=160): _, api = self.get_client(user, pwd, project, route_name) return api.check_run_status(run_result.run_id, timeout=timeout) diff --git a/ods_ci/libs/Helpers.py b/ods_ci/libs/Helpers.py index 3dd017929..cd66070cf 100644 --- a/ods_ci/libs/Helpers.py +++ b/ods_ci/libs/Helpers.py @@ -50,9 +50,7 @@ def install_rhoam_addon(self, cluster_name): ocm_client.cluster_name = cluster_name result = ocm_client.install_rhoam_addon(exit_on_failure=False) if not result: - self.BuiltIn.fail( - "Something got wrong while installing RHOAM. Check the logs" - ) + self.BuiltIn.fail("Something got wrong while installing RHOAM. Check the logs") @keyword def uninstall_rhoam_using_addon_flow(self, cluster_name): @@ -83,20 +81,13 @@ def uninstall_rhods_using_addon(self, cluster_name): ocm_client.uninstall_rhods() @keyword - def update_notification_email_address( - self, cluster_name, email_address, addon_name="managed-odh" - ): + def update_notification_email_address(self, cluster_name, email_address, addon_name="managed-odh"): """Update notification email for add-ons using OCM""" ocm_client = OpenshiftClusterManager() ocm_client.cluster_name = cluster_name - status = ocm_client.update_notification_email_address( - addon_name, email_address, exit_on_failure=False - ) + status = ocm_client.update_notification_email_address(addon_name, email_address, exit_on_failure=False) if not status: - self.BuiltIn.fail( - "Unable to update notification email," - " Check if operator is installed via Add-on" - ) + self.BuiltIn.fail("Unable to update notification email, Check if operator is installed via Add-on") @keyword def convert_to_hours_and_minutes(self, seconds): @@ -108,17 +99,14 @@ def convert_to_hours_and_minutes(self, seconds): @keyword def install_isv_by_name(self, operator_name, channel, source="certified-operators"): ocm_client = OpenshiftClusterManager() - ocm_client.install_openshift_isv( - operator_name, channel, source, exit_on_failure=False - ) + ocm_client.install_openshift_isv(operator_name, channel, source, exit_on_failure=False) if operator_name == "ovms": status = ocm_client.wait_for_isv_installation_to_complete("openvino") else: status = ocm_client.wait_for_isv_installation_to_complete(operator_name) if not status: self.BuiltIn.fail( - "Unable to install the {} isv, Check if ISV subscription is " - "created{}".format(operator_name, status) + "Unable to install the {} isv, Check if ISV subscription is created{}".format(operator_name, status) ) @keyword @@ -150,13 +138,9 @@ def install_managed_starburst_addon(self, email_address, license, cluster_name): ocm_client.cluster_name = cluster_name ocm_client.notification_email = email_address license_escaped = license.replace('"', '\\"') - result = ocm_client.install_managed_starburst_addon( - license=license_escaped, exit_on_failure=False - ) + result = ocm_client.install_managed_starburst_addon(license=license_escaped, exit_on_failure=False) if not result: - self.BuiltIn.fail( - "Something got wrong while installing Managed Starburst. Check the logs" - ) + self.BuiltIn.fail("Something got wrong while installing Managed Starburst. Check the logs") @keyword def uninstall_managed_starburst_using_addon_flow(self, cluster_name): @@ -186,15 +170,11 @@ def _inference_object_comparison(expected, received, threshold): if not expected.keys() == received.keys(): failures.append([expected.keys(), received.keys()]) for k in expected.keys(): - _inference_object_comparison( - expected[k], received[k], threshold - ) + _inference_object_comparison(expected[k], received[k], threshold) elif isinstance(expected, list): # if current element is a list, compare each value 1 by 1 for id, _ in enumerate(expected): - _inference_object_comparison( - expected[id], received[id], threshold - ) + _inference_object_comparison(expected[id], received[id], threshold) elif isinstance(expected, numbers.Number): # if current element is a number, compare each value with a rounding threshold if not expected - received <= threshold: @@ -251,8 +231,7 @@ def send_random_inference_request( for _ in range(no_requests): data_img = [ - random.randrange(value_range[0], value_range[1]) - for _ in range(shape["C"] * shape["H"] * shape["W"]) + random.randrange(value_range[0], value_range[1]) for _ in range(shape["C"] * shape["H"] * shape["W"]) ] headers = { diff --git a/ods_ci/tests/Resources/Files/pipeline-samples/flip_coin.py b/ods_ci/tests/Resources/Files/pipeline-samples/flip_coin.py index 9729ece7e..e5bff93f7 100644 --- a/ods_ci/tests/Resources/Files/pipeline-samples/flip_coin.py +++ b/ods_ci/tests/Resources/Files/pipeline-samples/flip_coin.py @@ -46,12 +46,8 @@ def print_msg(msg: str): description="Shows how to use dsl.Condition().", ) def flipcoin_pipeline(): - flip_coin_op = components.create_component_from_func( - flip_coin, base_image=DataSciencePipelinesKfpTekton.base_image - ) - print_op = components.create_component_from_func( - print_msg, base_image=DataSciencePipelinesKfpTekton.base_image - ) + flip_coin_op = components.create_component_from_func(flip_coin, base_image=DataSciencePipelinesKfpTekton.base_image) + print_op = components.create_component_from_func(print_msg, base_image=DataSciencePipelinesKfpTekton.base_image) random_num_op = components.create_component_from_func( random_num, base_image=DataSciencePipelinesKfpTekton.base_image ) diff --git a/ods_ci/tests/Resources/Files/pipeline-samples/ray_integration.py b/ods_ci/tests/Resources/Files/pipeline-samples/ray_integration.py index a026740ee..eb587cb4d 100644 --- a/ods_ci/tests/Resources/Files/pipeline-samples/ray_integration.py +++ b/ods_ci/tests/Resources/Files/pipeline-samples/ray_integration.py @@ -9,9 +9,7 @@ def ray_fn(openshift_server: str, openshift_token: str) -> int: from codeflare_sdk.cluster.cluster import Cluster, ClusterConfiguration print("before login") - auth = TokenAuthentication( - token=openshift_token, server=openshift_server, skip_tls=True - ) + auth = TokenAuthentication(token=openshift_token, server=openshift_server, skip_tls=True) auth_return = auth.login() print(f'auth_return: "{auth_return}"') print("after login") diff --git a/ods_ci/tests/Resources/Files/pipeline-samples/take_nap.py b/ods_ci/tests/Resources/Files/pipeline-samples/take_nap.py index 3907b2ace..68e290d06 100644 --- a/ods_ci/tests/Resources/Files/pipeline-samples/take_nap.py +++ b/ods_ci/tests/Resources/Files/pipeline-samples/take_nap.py @@ -38,6 +38,4 @@ def take_nap_pipeline(naptime_secs: int = 900): if __name__ == "__main__": from kfp_tekton.compiler import TektonCompiler - TektonCompiler().compile( - take_nap_pipeline, package_path=__file__.replace(".py", ".yaml") - ) + TektonCompiler().compile(take_nap_pipeline, package_path=__file__.replace(".py", ".yaml")) diff --git a/ods_ci/tests/Resources/Files/pipeline-samples/upload_download.py b/ods_ci/tests/Resources/Files/pipeline-samples/upload_download.py index fca7fe171..f77693b3e 100644 --- a/ods_ci/tests/Resources/Files/pipeline-samples/upload_download.py +++ b/ods_ci/tests/Resources/Files/pipeline-samples/upload_download.py @@ -1,4 +1,5 @@ """Test pipeline to exercise various data flow mechanisms.""" + import kfp from ods_ci.libs.DataSciencePipelinesKfpTekton import DataSciencePipelinesKfpTekton @@ -18,9 +19,7 @@ def create_large_file(file_path, size_in_bytes): f.write(os.urandom(size_in_bytes)) def zip_file(input_file_path, output_zip_path): - with zipfile.ZipFile( - output_zip_path, "w", compression=zipfile.ZIP_DEFLATED - ) as zipf: + with zipfile.ZipFile(output_zip_path, "w", compression=zipfile.ZIP_DEFLATED) as zipf: zipf.write(input_file_path, os.path.basename(input_file_path)) print("starting creating the file...") @@ -77,9 +76,7 @@ def inner_decode(my_str): secret_key = inner_decode(mlpipeline_minio_artifact_secret["secretkey"]) secure = inner_decode(mlpipeline_minio_artifact_secret["secure"]) secure = secure.lower() == "true" - client = Minio( - f"{host}:{port}", access_key=access_key, secret_key=secret_key, secure=secure - ) + client = Minio(f"{host}:{port}", access_key=access_key, secret_key=secret_key, secure=secure) data = client.get_object(bucket_name, object_name) with open("my-testfile", "wb") as file_data: diff --git a/ods_ci/utils/scripts/Sender/EmailSender.py b/ods_ci/utils/scripts/Sender/EmailSender.py index c26b14828..c0645792d 100644 --- a/ods_ci/utils/scripts/Sender/EmailSender.py +++ b/ods_ci/utils/scripts/Sender/EmailSender.py @@ -23,17 +23,13 @@ def __init__(self): self._use_unsecure = False self._message = MIMEMultipart() - def prepare_payload( - self, text: str = "", attachments: list[Any] | None = None - ) -> None: + def prepare_payload(self, text: str = "", attachments: list[Any] | None = None) -> None: self._message.attach(MIMEText(text)) if attachments is not None: for filepath in attachments: with open(filepath, "rb") as file: part = MIMEApplication(file.read(), Name=basename(filepath)) - part["Content-Disposition"] = ( - 'attachment; filename="%s"' % basename(filepath) - ) + part["Content-Disposition"] = 'attachment; filename="%s"' % basename(filepath) self._message.attach(part) def prepare_header(self): @@ -56,9 +52,7 @@ def send(self): smtp.starttls(context=context) if self._server_usr and self._server_pw: smtp.login(self._server_usr, self._server_pw) - smtp.sendmail( - self._sender_address, self._receiver_addresses, self._message.as_string() - ) + smtp.sendmail(self._sender_address, self._receiver_addresses, self._message.as_string()) smtp.close() def set_sender_address(self, sender_address: str) -> None: @@ -79,9 +73,7 @@ def set_subject(self, subject: str) -> None: def get_subject(self) -> str: return self._subject - def set_server( - self, server: str, use_ssl: bool = False, use_unsecure: bool = False - ) -> None: + def set_server(self, server: str, use_ssl: bool = False, use_unsecure: bool = False) -> None: if ":" in server: server = server.split(":") self._server = server[0] diff --git a/ods_ci/utils/scripts/Sender/Sender.py b/ods_ci/utils/scripts/Sender/Sender.py index 64bceb28f..da0182f74 100644 --- a/ods_ci/utils/scripts/Sender/Sender.py +++ b/ods_ci/utils/scripts/Sender/Sender.py @@ -4,9 +4,7 @@ class Sender(ABC): @abstractmethod - def prepare_payload( - self, text: str = "", attachments: list[Any] | None = None - ) -> None: + def prepare_payload(self, text: str = "", attachments: list[Any] | None = None) -> None: pass @abstractmethod diff --git a/ods_ci/utils/scripts/Sender/send_report.py b/ods_ci/utils/scripts/Sender/send_report.py index 2a5a8a922..1707aa2f4 100644 --- a/ods_ci/utils/scripts/Sender/send_report.py +++ b/ods_ci/utils/scripts/Sender/send_report.py @@ -57,9 +57,7 @@ def send_email_report( description="Script to publish ods-ci results", ) - subparsers = parser.add_subparsers( - title="Available sub commands", help="sub-command help" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") # Argument parsers for sending report by email email_sender_parser = subparsers.add_parser( @@ -73,9 +71,7 @@ def send_email_report( "-s", "--sender-address", help="Send email from", action="store", required=True ) - args_email_sender_parser.add_argument( - "-r", "--receiver-addresses", help="Send email to", nargs="+", required=True - ) + args_email_sender_parser.add_argument("-r", "--receiver-addresses", help="Send email to", nargs="+", required=True) args_email_sender_parser.add_argument( "-b", diff --git a/ods_ci/utils/scripts/SplitSuite.py b/ods_ci/utils/scripts/SplitSuite.py index b6347d804..94b582759 100644 --- a/ods_ci/utils/scripts/SplitSuite.py +++ b/ods_ci/utils/scripts/SplitSuite.py @@ -76,9 +76,7 @@ def visit_suite(self, suite): suite_to_execute = chunked_suites[self.which_part - 1] try: - if len(chunked_suites[self.which_part]) < num_suite and int( - self.which_part - ) == int(self.parts): + if len(chunked_suites[self.which_part]) < num_suite and int(self.which_part) == int(self.parts): suite_to_execute.extend(chunked_suites[self.which_part]) except IndexError: pass diff --git a/ods_ci/utils/scripts/logger.py b/ods_ci/utils/scripts/logger.py index 569856c20..4a253cbec 100644 --- a/ods_ci/utils/scripts/logger.py +++ b/ods_ci/utils/scripts/logger.py @@ -14,9 +14,7 @@ logging.getLogger().addHandler(console) # Add file rotating handler, with level DEBUG -rotatingHandler = logging.handlers.RotatingFileHandler( - filename="ods-ci.log", maxBytes=1000, backupCount=5 -) +rotatingHandler = logging.handlers.RotatingFileHandler(filename="ods-ci.log", maxBytes=1000, backupCount=5) rotatingHandler.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") rotatingHandler.setFormatter(formatter) diff --git a/ods_ci/utils/scripts/ocm/ocm.py b/ods_ci/utils/scripts/ocm/ocm.py index f61d2f040..23c4e5466 100644 --- a/ods_ci/utils/scripts/ocm/ocm.py +++ b/ods_ci/utils/scripts/ocm/ocm.py @@ -18,14 +18,7 @@ dir_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(dir_path + "/../") from logger import log -from util import ( - clone_config_repo, - compare_dicts, - execute_command, - read_data_from_json, - read_yaml, - write_data_in_json, -) +from util import clone_config_repo, compare_dicts, execute_command, read_data_from_json, read_yaml, write_data_in_json """ Class for Openshift Cluster Manager @@ -142,9 +135,7 @@ def osd_cluster_create(self): """Creates OSD cluster""" if (self.channel_group == "candidate") and (self.testing_platform == "prod"): - log.error( - "Channel group 'candidate' is available only for stage environment." - ) + log.error("Channel group 'candidate' is available only for stage environment.") sys.exit(1) version = "" @@ -156,11 +147,7 @@ def osd_cluster_create(self): if self.channel_group == "candidate": chan_grp = "--channel-group {}".format(self.channel_group) - version_cmd = ( - 'ocm list versions {} | grep -w "'.format(chan_grp) - + re.escape(version) - + '*"' - ) + version_cmd = 'ocm list versions {} | grep -w "'.format(chan_grp) + re.escape(version) + '*"' log.info("CMD: {}".format(version_cmd)) versions = execute_command(version_cmd) if versions is not None: @@ -186,9 +173,7 @@ def osd_cluster_create(self): else: channel_grp = "--channel-group {} ".format(self.channel_group) else: - log.error( - "Invalid channel group. Values can be 'stable' or 'candidate'." - ) + log.error("Invalid channel group. Values can be 'stable' or 'candidate'.") if self.cloud_provider == "aws": cmd = ( @@ -242,10 +227,7 @@ def get_osd_cluster_id(self): ) ret = execute_command(cmd) if ret is None: - log.info( - "Unable to retrieve cluster ID for " - "cluster name {}. EXITING".format(self.cluster_name) - ) + log.info("Unable to retrieve cluster ID for cluster name {}. EXITING".format(self.cluster_name)) sys.exit(1) return ret.strip("\n") @@ -254,10 +236,7 @@ def get_osd_cluster_state(self): cluster_state = self.ocm_describe(filter="--json | jq -r '.state'") if cluster_state is None: - log.info( - "Unable to retrieve cluster state for " - "cluster name {}. EXITING".format(self.cluster_name) - ) + log.info("Unable to retrieve cluster state for cluster name {}. EXITING".format(self.cluster_name)) sys.exit(1) return cluster_state.strip("\n") @@ -266,10 +245,7 @@ def get_osd_cluster_version(self): cluster_version = self.ocm_describe(filter="--json | jq -r '.version.raw_id'") if cluster_version is None: - log.info( - "Unable to retrieve cluster version for " - "cluster name {}. EXITING".format(self.cluster_name) - ) + log.info("Unable to retrieve cluster version for cluster name {}. EXITING".format(self.cluster_name)) sys.exit(1) return cluster_version.strip("\n") @@ -279,10 +255,7 @@ def get_osd_cluster_console_url(self): filter_str = "--json | jq -r '.console.url'" cluster_console_url = self.ocm_describe(filter=filter_str) if cluster_console_url is None: - log.info( - "Unable to retrieve cluster console url " - "for cluster name {}. EXITING".format(self.cluster_name) - ) + log.info("Unable to retrieve cluster console url for cluster name {}. EXITING".format(self.cluster_name)) sys.exit(1) return cluster_console_url.strip("\n") @@ -319,14 +292,10 @@ def update_osd_cluster_info(self, config_file="cluster_config.yaml"): config_data = yaml.safe_load(file) if self.ldap_test_password != "": - config_data[self.cluster_name]["TEST_USER"][ - "PASSWORD" - ] = self.ldap_test_password + config_data[self.cluster_name]["TEST_USER"]["PASSWORD"] = self.ldap_test_password if self.htpasswd_cluster_password != "": - config_data[self.cluster_name]["OCP_ADMIN_USER"][ - "PASSWORD" - ] = self.htpasswd_cluster_password + config_data[self.cluster_name]["OCP_ADMIN_USER"]["PASSWORD"] = self.htpasswd_cluster_password with open(config_file, "w") as yaml_file: yaml_file.write(yaml.dump(config_data, default_flow_style=False)) @@ -345,18 +314,13 @@ def wait_for_osd_cluster_to_be_ready(self, timeout=7200): check_flag = True break elif cluster_state == "error": - log.info( - "{} is in error state. Hence exiting!!".format(self.cluster_name) - ) + log.info("{} is in error state. Hence exiting!!".format(self.cluster_name)) sys.exit(1) time.sleep(60) count += 60 if not check_flag: - log.info( - "{} not in ready state even after 2 hours." - " EXITING".format(self.cluster_name) - ) + log.info("{} not in ready state even after 2 hours. EXITING".format(self.cluster_name)) sys.exit(1) def _render_template(self, template_file, output_file, replace_vars): @@ -372,10 +336,7 @@ def _render_template(self, template_file, output_file, replace_vars): with open(output_file, "w") as fh: fh.write(outputText) except: - log.info( - "Failed to render template and create json " - "file {}".format(output_file) - ) + log.info("Failed to render template and create json file {}".format(output_file)) sys.exit(1) def is_addon_installed(self, addon_name="managed-odh"): @@ -383,31 +344,18 @@ def is_addon_installed(self, addon_name="managed-odh"): addon_state = self.get_addon_state(addon_name) if addon_state == "not installed": - log.info( - "Addon {} not installed in cluster " - "{}".format(addon_name, self.cluster_name) - ) + log.info("Addon {} not installed in cluster {}".format(addon_name, self.cluster_name)) return False - log.info( - "Addon {} is installed in cluster" - " {}".format(addon_name, self.cluster_name) - ) + log.info("Addon {} is installed in cluster {}".format(addon_name, self.cluster_name)) return True def get_addon_state(self, addon_name="managed-odh"): """Gets given addon's state""" - cmd = ( - "ocm list addons --cluster {} --columns id,state" - " | grep " - "{} ".format(self.cluster_name, addon_name) - ) + cmd = "ocm list addons --cluster {} --columns id,state | grep {} ".format(self.cluster_name, addon_name) ret = execute_command(cmd) if ret is None: - log.info( - "Failed to get {} addon state for cluster " - "{}".format(addon_name, self.cluster_name) - ) + log.info("Failed to get {} addon state for cluster {}".format(addon_name, self.cluster_name)) return None match = re.search(addon_name + "\s*(.*)", ret) if match is None: @@ -419,9 +367,7 @@ def check_if_machine_pool_exists(self): """Checks if given machine pool name already exists in cluster""" - cmd = "/bin/ocm list machinepools --cluster {} | grep -w {}".format( - self.cluster_name, self.pool_name - ) + cmd = "/bin/ocm list machinepools --cluster {} | grep -w {}".format(self.cluster_name, self.pool_name) ret = execute_command(cmd) if not ret: return False @@ -456,9 +402,7 @@ def add_machine_pool(self): sys.exit(1) time.sleep(60) - def wait_for_addon_installation_to_complete( - self, addon_name="managed-odh", timeout=3600 - ): + def wait_for_addon_installation_to_complete(self, addon_name="managed-odh", timeout=3600): """Waits for addon installation to get complete""" addon_state = self.get_addon_state(addon_name) @@ -474,15 +418,10 @@ def wait_for_addon_installation_to_complete( time.sleep(60) count += 60 if not check_flag: - log.info( - "addon {} not in installed state even after " - "60 minutes. EXITING".format(addon_name) - ) + log.info("addon {} not in installed state even after 60 minutes. EXITING".format(addon_name)) sys.exit(1) - def wait_for_addon_uninstallation_to_complete( - self, addon_name="managed-odh", timeout=3600 - ): + def wait_for_addon_uninstallation_to_complete(self, addon_name="managed-odh", timeout=3600): """Waits for addon uninstallation to get complete""" addon_state = self.get_addon_state(addon_name) @@ -498,10 +437,7 @@ def wait_for_addon_uninstallation_to_complete( time.sleep(60) count += 60 if not check_flag: - log.info( - "addon {} not in uninstalled state even after " - "60 minutes. EXITING".format(addon_name) - ) + log.info("addon {} not in uninstalled state even after 60 minutes. EXITING".format(addon_name)) sys.exit(1) def list_idps(self): @@ -529,18 +465,13 @@ def uninstall_addon(self, addon_name="managed-odh", exit_on_failure=True): addon_state = self.get_addon_state(addon_name) if addon_state != "not installed": cluster_id = self.get_osd_cluster_id() - cmd = ( - "ocm --v={} delete /api/clusters_mgmt/v1/clusters/{}/addons/{}".format( - self.ocm_verbose_level, cluster_id, addon_name - ) + cmd = "ocm --v={} delete /api/clusters_mgmt/v1/clusters/{}/addons/{}".format( + self.ocm_verbose_level, cluster_id, addon_name ) log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info( - "Failed to uninstall {} addon on cluster " - "{}".format(addon_name, self.cluster_name) - ) + log.info("Failed to uninstall {} addon on cluster {}".format(addon_name, self.cluster_name)) if exit_on_failure: sys.exit(1) @@ -609,10 +540,8 @@ def install_addon( output_file = output_filename self._render_template(template_file, output_file, replace_vars) cluster_id = self.get_osd_cluster_id() - cmd = ( - "ocm --v={} post /api/clusters_mgmt/v1/clusters/{}/addons --body={}".format( - self.ocm_verbose_level, cluster_id, output_file - ) + cmd = "ocm --v={} post /api/clusters_mgmt/v1/clusters/{}/addons --body={}".format( + self.ocm_verbose_level, cluster_id, output_file ) log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) @@ -621,10 +550,7 @@ def install_addon( log.info("\nRET: {}".format(ret)) failure_flag = False if ret is None: - log.info( - "Failed to install {} addon on cluster " - "{}".format(addon_name, self.cluster_name) - ) + log.info("Failed to install {} addon on cluster {}".format(addon_name, self.cluster_name)) failure_flag = True if exit_on_failure: sys.exit(1) @@ -632,15 +558,11 @@ def install_addon( return failure_flag return failure_flag - def is_oc_obj_existent( - self, kind, name, namespace, retries=30, retry_sec_interval=3 - ): + def is_oc_obj_existent(self, kind, name, namespace, retries=30, retry_sec_interval=3): log.info( "\nGetting {} with name {} from {} namespace." "In case of failure, the operation will be repeated every {} seconds, " - "maximum {} times".format( - kind, name, namespace, retry_sec_interval, retries - ) + "maximum {} times".format(kind, name, namespace, retry_sec_interval, retries) ) found = False for retry in range(retries): @@ -660,9 +582,7 @@ def is_oc_obj_existent( found = True break if not found: - log.error( - "{} object called {} not found (ns: {}).".format(kind, name, namespace) - ) + log.error("{} object called {} not found (ns: {}).".format(kind, name, namespace)) return found def install_rhoam_addon(self, exit_on_failure=True): @@ -714,10 +634,7 @@ def install_rhoam_addon(self, exit_on_failure=True): log.info("redhat-rhoam-dms secret found!") else: failure_flags.append(True) - log.info( - "redhat-rhoam-deadmanssnitch secret was " - "not created during installation" - ) + log.info("redhat-rhoam-deadmanssnitch secret was not created during installation") if exit_on_failure: sys.exit(1) @@ -747,17 +664,11 @@ def install_rhoam_addon(self, exit_on_failure=True): # else: # self.wait_for_addon_installation_to_complete(addon_name="managed-api-service") else: - log.info( - "managed-api-service is already installed on {}".format( - self.cluster_name - ) - ) + log.info("managed-api-service is already installed on {}".format(self.cluster_name)) def uninstall_rhoam_addon(self, exit_on_failure=True): """Uninstalls RHOAM addon""" - self.uninstall_addon( - addon_name="managed-api-service", exit_on_failure=exit_on_failure - ) + self.uninstall_addon(addon_name="managed-api-service", exit_on_failure=exit_on_failure) self.wait_for_addon_uninstallation_to_complete(addon_name="managed-api-service") def install_managed_starburst_addon(self, license, exit_on_failure=True): @@ -787,17 +698,11 @@ def install_managed_starburst_addon(self, license, exit_on_failure=True): # else: # self.wait_for_addon_installation_to_complete(addon_name="managed-starburst") else: - log.info( - "managed-api-service is already installed on {}".format( - self.cluster_name - ) - ) + log.info("managed-api-service is already installed on {}".format(self.cluster_name)) def uninstall_managed_starburst_addon(self, exit_on_failure=True): """Uninstalls RHOAM addon""" - self.uninstall_addon( - addon_name="managed-starburst", exit_on_failure=exit_on_failure - ) + self.uninstall_addon(addon_name="managed-starburst", exit_on_failure=exit_on_failure) self.wait_for_addon_uninstallation_to_complete(addon_name="managed-starburst") def create_idp(self): @@ -812,10 +717,7 @@ def create_idp(self): log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info( - "Failed to add identity provider of " - "type {}".format(self.idp_type) - ) + log.info("Failed to add identity provider of type {}".format(self.idp_type)) self.add_user_to_group() time.sleep(10) @@ -841,19 +743,14 @@ def create_idp(self): # self.add_user_to_group() elif self.idp_type == "ldap": - ldap_yaml_file = ( - os.path.abspath(os.path.dirname(__file__)) - + "/../../../configs/templates/ldap/ldap.yaml" - ) + ldap_yaml_file = os.path.abspath(os.path.dirname(__file__)) + "/../../../configs/templates/ldap/ldap.yaml" fin = open(ldap_yaml_file, "rt") fout = open(ldap_yaml_file + "_replaced", "wt") for line in fin: if "" in line: fout.write(line.replace("", self.ldap_users_string)) elif "" in line: - fout.write( - line.replace("", self.ldap_passwords_string) - ) + fout.write(line.replace("", self.ldap_passwords_string)) elif "" in line: fout.write(line.replace("", self.ldap_bind_password)) else: @@ -865,8 +762,7 @@ def create_idp(self): message_bytes = base64.b64decode(base64_bytes) ldap_bind_password_dec = message_bytes.decode("ascii") ldap_yaml_file = ( - os.path.abspath(os.path.dirname(__file__)) - + "/../../../configs/templates/ldap/ldap.yaml_replaced" + os.path.abspath(os.path.dirname(__file__)) + "/../../../configs/templates/ldap/ldap.yaml_replaced" ) cmd = "oc apply -f {}".format(ldap_yaml_file) log.info("CMD: {}".format(cmd)) @@ -898,15 +794,11 @@ def create_idp(self): def delete_idp(self): """Deletes Identity Provider""" - cmd = "ocm --v={} delete idp -c {} {}".format( - self.ocm_verbose_level, self.cluster_name, self.idp_name - ) + cmd = "ocm --v={} delete idp -c {} {}".format(self.ocm_verbose_level, self.cluster_name, self.idp_name) log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info( - "Failed to delete identity provider of type {}".format(self.idp_name) - ) + log.info("Failed to delete identity provider of type {}".format(self.idp_name)) def add_user_to_group(self, user="", group="cluster-admins"): """Adds user to given group""" @@ -970,9 +862,7 @@ def add_users_to_rhods_group(self): self.create_group("rhods-noaccess") # Adds user ldap-noaccess1..ldap-noaccessN for i in range(1, int(self.num_users_to_create_per_group) + 1): - self.add_user_to_group( - user="ldap-noaccess" + str(i), group="rhods-noaccess" - ) + self.add_user_to_group(user="ldap-noaccess" + str(i), group="rhods-noaccess") # Logging users/groups details after adding # given user to group @@ -1019,12 +909,8 @@ def install_rhods_addon(self): # Install dependency operators for rhoai deployment dependency_operators = ["servicemesh", "serverless"] for dependency_operator in dependency_operators: - self.install_openshift_isv( - dependency_operator, "stable", "redhat-operators" - ) - self.wait_for_isv_installation_to_complete( - dependency_operator, namespace="openshift-operators" - ) + self.install_openshift_isv(dependency_operator, "stable", "redhat-operators") + self.wait_for_isv_installation_to_complete(dependency_operator, namespace="openshift-operators") # Deploy rhoai self.install_rhods() @@ -1091,19 +977,14 @@ def wait_for_osd_cluster_to_get_deleted(self, timeout=3600): time.sleep(60) count += 60 if not check_flag: - log.info( - "{} not deleted even after an hour." - " EXITING".format(self.cluster_name) - ) + log.info("{} not deleted even after an hour. EXITING".format(self.cluster_name)) sys.exit(1) def hibernate_cluster(self): """Hibernate OSD Cluster""" cluster_id = self.get_osd_cluster_id() - cmd = "ocm --v={} hibernate cluster {}".format( - self.ocm_verbose_level, cluster_id - ) + cmd = "ocm --v={} hibernate cluster {}".format(self.ocm_verbose_level, cluster_id) log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: @@ -1128,10 +1009,7 @@ def wait_for_osd_cluster_to_get_hibernated(self, timeout=1800): time.sleep(60) count += 60 if not check_flag: - log.info( - "{} not in hibernating state even after 30 mins." - " EXITING".format(self.cluster_name) - ) + log.info("{} not in hibernating state even after 30 mins. EXITING".format(self.cluster_name)) sys.exit(1) def resume_cluster(self): @@ -1163,34 +1041,23 @@ def wait_for_osd_cluster_to_get_resumed(self, timeout=3600): time.sleep(60) count += 60 if not check_flag: - log.info( - "{} not in ready state even after 30 mins." - " EXITING".format(self.cluster_name) - ) + log.info("{} not in ready state even after 30 mins. EXITING".format(self.cluster_name)) sys.exit(1) - def update_notification_email_address( - self, addon_name, email_address, exit_on_failure=True - ): + def update_notification_email_address(self, addon_name, email_address, exit_on_failure=True): """Update notification email to Addons""" replace_vars = {"EMAIL_ADDER": email_address} template_file = "notification_email.jinja" output_file = "notification_email.json" self._render_template(template_file, output_file, replace_vars) cluster_id = self.get_osd_cluster_id() - cmd = ( - "ocm --v={} patch /api/clusters_mgmt/v1/clusters/{}/addons/{} " - "--body={}".format( - self.ocm_verbose_level, cluster_id, addon_name, output_file - ) + cmd = "ocm --v={} patch /api/clusters_mgmt/v1/clusters/{}/addons/{} --body={}".format( + self.ocm_verbose_level, cluster_id, addon_name, output_file ) log.info("CMD: {}".format(cmd)) ret = execute_command(cmd) if ret is None: - log.info( - "Failed to update email address to {} addon on cluster " - "{}".format(addon_name, self.cluster_name) - ) + log.info("Failed to update email address to {} addon on cluster {}".format(addon_name, self.cluster_name)) if exit_on_failure: sys.exit(1) else: @@ -1198,9 +1065,7 @@ def update_notification_email_address( else: return ret - def install_openshift_isv( - self, operator_name, channel, source, exit_on_failure=True - ): + def install_openshift_isv(self, operator_name, channel, source, exit_on_failure=True): replace_vars = { "ISV_NAME": operator_name, "CHANNEL": channel, @@ -1255,9 +1120,7 @@ def yaml_loader(filepath): else: return ret - def wait_for_isv_installation_to_complete( - self, isv_name, namespace="openshift-operators", timeout=300 - ): + def wait_for_isv_installation_to_complete(self, isv_name, namespace="openshift-operators", timeout=300): count = 0 check_flag = False cmd = ( @@ -1276,10 +1139,7 @@ def wait_for_isv_installation_to_complete( time.sleep(60) count += 60 if not check_flag: - log.info( - "ISV {} not in installed state even after " - "5 minutes. EXITING".format(isv_name) - ) + log.info("ISV {} not in installed state even after 5 minutes. EXITING".format(isv_name)) return False else: return True @@ -1294,9 +1154,8 @@ def get_latest_osd_candidate_version(self, osd_major_version, osd_minor_version) For example in 4.8.10 : 4 is minor version Example 4.8 = osd_major_version.osd_minor_version """ - cmd = ( - "ocm list versions --channel-group candidate |" - " grep ^{}.{}|tail -1".format(osd_major_version, osd_minor_version) + cmd = "ocm list versions --channel-group candidate | grep ^{}.{}|tail -1".format( + osd_major_version, osd_minor_version ) ret = execute_command(cmd) if ret is None: @@ -1313,19 +1172,11 @@ def get_all_osd_versions(self): # Dict that will be converted into json file latest_osd_versions_data = {} osd_versions_dict = {} - for candidate_version in range( - int(self.osd_minor_version_start), int(self.osd_minor_version_end) - ): - version = ( - self.get_latest_osd_candidate_version( - self.osd_major_version, candidate_version - ) - ).split("-")[0] + for candidate_version in range(int(self.osd_minor_version_start), int(self.osd_minor_version_end)): + version = (self.get_latest_osd_candidate_version(self.osd_major_version, candidate_version)).split("-")[0] if version: osd_versions_dict[".".join(version.split(".")[:2])] = version - latest_osd_versions_data[str(self.osd_major_version)] = ( - osd_versions_dict - ) + latest_osd_versions_data[str(self.osd_major_version)] = osd_versions_dict log.info(latest_osd_versions_data) return latest_osd_versions_data @@ -1342,18 +1193,12 @@ def compare_with_old_version_file(self): if new_data == old_data: old_data.update(new_data) - log.info( - "All the osd version in file is up to date." - " file_data:{}".format(old_data) - ) + log.info("All the osd version in file is up to date. file_data:{}".format(old_data)) new_data["RUN"] = None write_data_in_json(filename=self.osd_latest_version_data, data=old_data) return None else: - if ( - self.osd_major_version not in old_data.keys() - and self.osd_major_version in new_data.keys() - ): + if self.osd_major_version not in old_data.keys() and self.osd_major_version in new_data.keys(): old_data[self.osd_major_version] = {"0": "0"} log.info(old_data.keys()) lst_to_trigger_job = compare_dicts( @@ -1378,11 +1223,8 @@ def compare_with_old_version_file(self): def change_cluster_channel_group(self): """update the channel using ocm cmd""" cluster_id = self.get_osd_cluster_id() - run_change_channel_cmd = ( - "ocm --v={} patch /api/clusters_mgmt/v1/clusters/{}" - " --body {}".format( - self.ocm_verbose_level, cluster_id, self.update_ocm_channel_json - ) + run_change_channel_cmd = "ocm --v={} patch /api/clusters_mgmt/v1/clusters/{} --body {}".format( + self.ocm_verbose_level, cluster_id, self.update_ocm_channel_json ) log.info(run_change_channel_cmd) ret = execute_command(run_change_channel_cmd) @@ -1405,18 +1247,14 @@ def update_ocm_policy(self): cluster_id ) latest_upgrade_version = execute_command(get_latest_upgrade_version) - log.info( - "Version Available to Upgrade are ...{}".format(latest_upgrade_version) - ) + log.info("Version Available to Upgrade are ...{}".format(latest_upgrade_version)) latest_upgrade_version = ast.literal_eval(latest_upgrade_version)[-1] data["version"] = latest_upgrade_version write_data_in_json(self.update_policies_json, data) schedule_cluster_upgrade = ( "ocm --v={} post /api/clusters_mgmt/v1/clusters/{}/upgrade_policies" - " --body {}".format( - self.ocm_verbose_level, cluster_id, self.update_policies_json - ) + " --body {}".format(self.ocm_verbose_level, cluster_id, self.update_policies_json) ) ret = execute_command(schedule_cluster_upgrade) if ret is None: @@ -1430,10 +1268,7 @@ def update_ocm_policy(self): """Parse CLI arguments""" - ocm_cli_binary_url = ( - "https://github.com/openshift-online/ocm-cli/" - "releases/download/v0.1.55/ocm-linux-amd64" - ) + ocm_cli_binary_url = "https://github.com/openshift-online/ocm-cli/releases/download/v0.1.55/ocm-linux-amd64" parser = argparse.ArgumentParser( usage=argparse.SUPPRESS, formatter_class=argparse.ArgumentDefaultsHelpFormatter, @@ -1457,9 +1292,7 @@ def update_ocm_policy(self): default="0", ) - subparsers = parser.add_subparsers( - title="Available sub commands", help="Available sub commands" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="Available sub commands") # Argument of update_ocm_policy update_ocm_policy = subparsers.add_parser( "update_ocm_policy", @@ -1514,8 +1347,7 @@ def update_ocm_policy(self): ) get_latest_osd_candidate_json.add_argument( "--json-path", - help="json file path to store osd latest version details." - "The file should be created already.", + help="json file path to store osd latest version details.The file should be created already.", action="store", dest="osd_latest_version_data", required=True, @@ -1548,9 +1380,7 @@ def update_ocm_policy(self): dest="new_run", required=False, ) - get_latest_osd_candidate_json.set_defaults( - func=ocm_obj.compare_with_old_version_file - ) + get_latest_osd_candidate_json.set_defaults(func=ocm_obj.compare_with_old_version_file) # Argument parsers for ocm_login ocm_login_parser = subparsers.add_parser( @@ -1560,9 +1390,7 @@ def update_ocm_policy(self): ) optional_ocm_login_parser = ocm_login_parser._action_groups.pop() - required_ocm_login_parser = ocm_login_parser.add_argument_group( - "required arguments" - ) + required_ocm_login_parser = ocm_login_parser.add_argument_group("required arguments") ocm_login_parser._action_groups.append(optional_ocm_login_parser) required_ocm_login_parser.add_argument( "--token", @@ -1590,16 +1418,10 @@ def update_ocm_policy(self): ) optional_create_cluster_parser = create_cluster_parser._action_groups.pop() - required_create_cluster_parser = create_cluster_parser.add_argument_group( - "required arguments" - ) + required_create_cluster_parser = create_cluster_parser.add_argument_group("required arguments") - aws_create_cluster_parser = create_cluster_parser.add_argument_group( - " Options for creating OSD cluster in AWS" - ) - gcp_create_cluster_parser = create_cluster_parser.add_argument_group( - " Options for creating OSD cluster in GCP" - ) + aws_create_cluster_parser = create_cluster_parser.add_argument_group(" Options for creating OSD cluster in AWS") + gcp_create_cluster_parser = create_cluster_parser.add_argument_group(" Options for creating OSD cluster in GCP") create_cluster_parser._action_groups.append(optional_create_cluster_parser) @@ -1916,9 +1738,7 @@ def update_ocm_policy(self): formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_delete_idp_parser = delete_idp_parser._action_groups.pop() - required_delete_idp_parser = delete_idp_parser.add_argument_group( - "required arguments" - ) + required_delete_idp_parser = delete_idp_parser.add_argument_group("required arguments") delete_idp_parser._action_groups.append(optional_delete_idp_parser) required_delete_idp_parser.add_argument( "--idp-name", @@ -1965,9 +1785,7 @@ def update_ocm_policy(self): formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_update_info_parser = update_info_parser._action_groups.pop() - required_update_info_parser = update_info_parser.add_argument_group( - "required arguments" - ) + required_update_info_parser = update_info_parser.add_argument_group("required arguments") update_info_parser._action_groups.append(optional_update_info_parser) optional_update_info_parser.add_argument( @@ -2002,9 +1820,7 @@ def update_ocm_policy(self): help=("Install rhods addon cluster."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_install_rhods_parser = install_rhods_parser.add_argument_group( - "required arguments" - ) + required_install_rhods_parser = install_rhods_parser.add_argument_group("required arguments") required_install_rhods_parser.add_argument( "--cluster-name", @@ -2028,9 +1844,7 @@ def update_ocm_policy(self): help=("Install gpu addon cluster."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_install_gpu_parser = install_gpu_parser.add_argument_group( - "required arguments" - ) + required_install_gpu_parser = install_gpu_parser.add_argument_group("required arguments") required_install_gpu_parser.add_argument( "--cluster-name", @@ -2049,9 +1863,7 @@ def update_ocm_policy(self): ) optional_machinepool_cluster_parser = add_machinepool_parser._action_groups.pop() - required_machinepool_cluster_parser = add_machinepool_parser.add_argument_group( - "required arguments" - ) + required_machinepool_cluster_parser = add_machinepool_parser.add_argument_group("required arguments") add_machinepool_parser._action_groups.append(optional_machinepool_cluster_parser) required_machinepool_cluster_parser.add_argument( @@ -2108,9 +1920,7 @@ def update_ocm_policy(self): help=("Uninstall rhods addon cluster."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_uninstall_rhods_parser = uninstall_rhods_parser.add_argument_group( - "required arguments" - ) + required_uninstall_rhods_parser = uninstall_rhods_parser.add_argument_group("required arguments") required_uninstall_rhods_parser.add_argument( "--cluster-name", @@ -2127,9 +1937,7 @@ def update_ocm_policy(self): help=("Install rhoam addon cluster."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_install_rhoam_parser = install_rhoam_parser.add_argument_group( - "required arguments" - ) + required_install_rhoam_parser = install_rhoam_parser.add_argument_group("required arguments") required_install_rhoam_parser.add_argument( "--cluster-name", @@ -2146,9 +1954,7 @@ def update_ocm_policy(self): help=("Uninstall rhoam addon cluster."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_uninstall_rhoam_parser = uninstall_rhoam_parser.add_argument_group( - "required arguments" - ) + required_uninstall_rhoam_parser = uninstall_rhoam_parser.add_argument_group("required arguments") required_uninstall_rhoam_parser.add_argument( "--cluster-name", @@ -2166,15 +1972,9 @@ def update_ocm_policy(self): formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) optional_create_idp_parser = create_idp_parser._action_groups.pop() - required_create_idp_parser = create_idp_parser.add_argument_group( - "required arguments" - ) - ldap_create_idp_parser = create_idp_parser.add_argument_group( - " Options for ldap IDP" - ) - htpasswd_create_idp_parser = create_idp_parser.add_argument_group( - " Options for htpasswd IDP" - ) + required_create_idp_parser = create_idp_parser.add_argument_group("required arguments") + ldap_create_idp_parser = create_idp_parser.add_argument_group(" Options for ldap IDP") + htpasswd_create_idp_parser = create_idp_parser.add_argument_group(" Options for htpasswd IDP") create_idp_parser._action_groups.append(optional_create_idp_parser) required_create_idp_parser.add_argument( @@ -2196,11 +1996,7 @@ def update_ocm_policy(self): "--ldap-url ", help="ldap: Ldap url", metavar=" ", - default=( - "ldap://openldap.openldap.svc." - "cluster.local:1389" - "/dc=example,dc=org?uid" - ), + default=("ldap://openldap.openldap.svc.cluster.local:1389/dc=example,dc=org?uid"), ) ldap_create_idp_parser.add_argument( "--ldap-bind-dn ", @@ -2230,11 +2026,7 @@ def update_ocm_policy(self): action="store", dest="ldap_url", metavar="", - default=( - "ldap://openldap.openldap.svc." - "cluster.local:1389" - "/dc=example,dc=org?uid" - ), + default=("ldap://openldap.openldap.svc.cluster.local:1389/dc=example,dc=org?uid"), ) optional_create_idp_parser.add_argument( "--ldap-bind-dn", diff --git a/ods_ci/utils/scripts/openshift/openshift.py b/ods_ci/utils/scripts/openshift/openshift.py index 92d304344..67c32351e 100644 --- a/ods_ci/utils/scripts/openshift/openshift.py +++ b/ods_ci/utils/scripts/openshift/openshift.py @@ -144,11 +144,7 @@ def openshift_install(self, config_file="cluster_config.yaml"): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) - log.info( - "Executing openshift-install create cluster command in {}".format( - install_config_dir - ) - ) + log.info("Executing openshift-install create cluster command in {}".format(install_config_dir)) log.info("OpenShift Cluster creation is in progress...") returncode = process.wait() if returncode != 0: @@ -163,14 +159,10 @@ def openshift_install(self, config_file="cluster_config.yaml"): re.S, ) if match is None: - log.error( - "Unexpected console logs in openshift-install create cluster output" - ) + log.error("Unexpected console logs in openshift-install create cluster output") sys.exit(1) - log.info( - "OpenShift Cluster {} created successfully !".format(self.cluster_name) - ) + log.info("OpenShift Cluster {} created successfully !".format(self.cluster_name)) cluster_info = {} cluster_info["CLUSTER_NAME"] = self.cluster_name @@ -213,9 +205,7 @@ def openshift_destroy(self): description="Script to do openshift operations on AWS", ) - subparsers = parser.add_subparsers( - title="Available sub commands", help="sub-command help" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") # Argument parsers for create_cluster openshift_install_parser = subparsers.add_parser( @@ -225,9 +215,7 @@ def openshift_destroy(self): ) optional_openshift_install_parser = openshift_install_parser._action_groups.pop() - required_openshift_install_parser = openshift_install_parser.add_argument_group( - "required arguments" - ) + required_openshift_install_parser = openshift_install_parser.add_argument_group("required arguments") openshift_install_parser._action_groups.append(optional_openshift_install_parser) required_openshift_install_parser.add_argument( @@ -248,8 +236,7 @@ def openshift_destroy(self): required_openshift_install_parser.add_argument( "--install-config-file", - help="Install config file. Note: " - "Place this file from where you are running this for now", + help="Install config file. Note: Place this file from where you are running this for now", action="store", dest="install_config_file", required=True, diff --git a/ods_ci/utils/scripts/polarion/xunit_add_properties.py b/ods_ci/utils/scripts/polarion/xunit_add_properties.py index 796ab17b0..975caefad 100644 --- a/ods_ci/utils/scripts/polarion/xunit_add_properties.py +++ b/ods_ci/utils/scripts/polarion/xunit_add_properties.py @@ -1,4 +1,5 @@ """Inserts properties from a config file into a xunit format XML file""" + import argparse import codecs import os @@ -126,9 +127,7 @@ def add_testcase_properties(xml_obj, tcconfig=None): multile_test_ids[testcase.get("name")] = polarion_id for key in multile_test_ids.keys(): - for index, testcase in enumerate( - xml_obj.findall(expression + "[@name='" + key + "']") - ): + for index, testcase in enumerate(xml_obj.findall(expression + "[@name='" + key + "']")): if index < len(multile_test_ids[testcase.get("name")]): tcproperties = et.Element("properties") test_id = "" @@ -219,9 +218,7 @@ def main(): args = parse_args() # Restructure the robot test result xml file - xunit_xml_file_restructured = ( - os.path.dirname(os.path.realpath(__file__)) + "/restructured_xml_file.xml" - ) + xunit_xml_file_restructured = os.path.dirname(os.path.realpath(__file__)) + "/restructured_xml_file.xml" restructure_xml_for_polarion(args.xunit_xml_file, xunit_xml_file_restructured) root = parse_xml(xunit_xml_file_restructured) diff --git a/ods_ci/utils/scripts/read_pr.py b/ods_ci/utils/scripts/read_pr.py index 3c97ae75a..d8474e94b 100644 --- a/ods_ci/utils/scripts/read_pr.py +++ b/ods_ci/utils/scripts/read_pr.py @@ -34,12 +34,7 @@ def generate_arg_commands(self, all_tags): for ft in all_tags: if ft == "DestructiveTest": destructive_tests.append(self.parse_tag(ft)) - elif ( - ft.startswith("Execution-Time-Over") - or ft == "Tier3" - or ft == "Tier1" - or ft == "Sanity" - ): + elif ft.startswith("Execution-Time-Over") or ft == "Tier3" or ft == "Tier1" or ft == "Sanity": slow_tests.append(self.parse_tag(ft)) elif ft.startswith("ODS-"): ods_tests.append(self.parse_tag(ft)) @@ -104,9 +99,7 @@ def find_tags(self, lines): if "#" in tag: # remove comment tag = tag[: tag.index("#")] - tag_data = tag.replace(" ", self.magic_tag_separator).split( - self.magic_tag_separator - ) + tag_data = tag.replace(" ", self.magic_tag_separator).split(self.magic_tag_separator) for tag_it in tag_data: tag_it = tag_it.strip() if len(tag_it) > 0: @@ -122,9 +115,7 @@ def get_sections_info(self, robot_file): for line in lines: if line.startswith("***"): # after each *** - self.apply_section_rule( - section_name, section_data, sections_info, file_name - ) + self.apply_section_rule(section_name, section_data, sections_info, file_name) if self.keyword_key in line: section_name = self.keyword_key diff --git a/ods_ci/utils/scripts/reportportal/rp_uploader.py b/ods_ci/utils/scripts/reportportal/rp_uploader.py index a1cd3e5cd..482a6870c 100644 --- a/ods_ci/utils/scripts/reportportal/rp_uploader.py +++ b/ods_ci/utils/scripts/reportportal/rp_uploader.py @@ -63,9 +63,7 @@ def upload_result(self): description="Script to upload results to report portal", ) - subparsers = parser.add_subparsers( - title="Available sub commands", help="sub-command help" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") # Argument parsers for uploading test results to report portal upload_result_parser = subparsers.add_parser( @@ -74,9 +72,7 @@ def upload_result(self): formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - required_upload_result_parser = upload_result_parser.add_argument_group( - "required arguments" - ) + required_upload_result_parser = upload_result_parser.add_argument_group("required arguments") required_upload_result_parser.add_argument( "--config-file", diff --git a/ods_ci/utils/scripts/rosa/rosa.py b/ods_ci/utils/scripts/rosa/rosa.py index e64d9bcfb..70b296f43 100644 --- a/ods_ci/utils/scripts/rosa/rosa.py +++ b/ods_ci/utils/scripts/rosa/rosa.py @@ -6,11 +6,7 @@ sys.path.append(dir_path + "/../") from awsOps import aws_configure from logger import log -from rosaOps import ( - create_account_roles, - rosa_create_cluster, - wait_for_osd_cluster_to_be_ready, -) +from rosaOps import create_account_roles, rosa_create_cluster, wait_for_osd_cluster_to_be_ready class RosaClusterManager: @@ -34,9 +30,7 @@ def create_rosa_cluster(self): self.compute_machine_type, self.rosa_version, ) - aws_configure( - self.aws_access_key_id, self.aws_secret_access_key, self.aws_region - ) + aws_configure(self.aws_access_key_id, self.aws_secret_access_key, self.aws_region) create_account_roles() rosa_create_cluster( self.cluster_name, @@ -56,9 +50,7 @@ def main(): ) # Argument parsers for create_cluster - subparsers = parser.add_subparsers( - title="Available sub commands", help="sub-command help" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") rosaClusterCreate_parser = subparsers.add_parser( "create_rosa_cluster", help=("create ROSA clusters using openshift installer"), diff --git a/ods_ci/utils/scripts/rosa/rosaOps.py b/ods_ci/utils/scripts/rosa/rosaOps.py index 16e3210a2..d7dd9cdcc 100644 --- a/ods_ci/utils/scripts/rosa/rosaOps.py +++ b/ods_ci/utils/scripts/rosa/rosaOps.py @@ -137,10 +137,7 @@ def get_rosa_cluster_state(cluster_name): cluster_state = rosa_describe(cluster_name, filter="--output json | jq -r '.state'") if cluster_state is None: - print( - "Unable to retrieve cluster state for " - "cluster name {}. EXITING".format(cluster_name) - ) + print("Unable to retrieve cluster state for cluster name {}. EXITING".format(cluster_name)) sys.exit(1) cluster_state = cluster_state.strip("\n") return cluster_state diff --git a/ods_ci/utils/scripts/terraform/openstack/provision.py b/ods_ci/utils/scripts/terraform/openstack/provision.py index 330e7c1b5..2c81369f5 100644 --- a/ods_ci/utils/scripts/terraform/openstack/provision.py +++ b/ods_ci/utils/scripts/terraform/openstack/provision.py @@ -119,9 +119,7 @@ def set_config(self): description="Script to manage instances in Openstack", ) - subparsers = parser.add_subparsers( - title="Available sub commands", help="sub-command help" - ) + subparsers = parser.add_subparsers(title="Available sub commands", help="sub-command help") # Argument parsers for create_instance create_instance_parser = subparsers.add_parser( @@ -131,9 +129,7 @@ def set_config(self): ) optional_create_instance_parser = create_instance_parser._action_groups.pop() - required_create_instance_parser = create_instance_parser.add_argument_group( - "required arguments" - ) + required_create_instance_parser = create_instance_parser.add_argument_group("required arguments") create_instance_parser._action_groups.append(optional_create_instance_parser) required_create_instance_parser.add_argument( "--cloud_name", @@ -161,8 +157,7 @@ def set_config(self): ) required_create_instance_parser.add_argument( "--key_pair", - help="The public key of an OpenSSH key" - " pair to be used for access to created instances", + help="The public key of an OpenSSH key pair to be used for access to created instances", action="store", dest="key_pair", required=True, @@ -200,9 +195,7 @@ def set_config(self): ) optional_delete_instance_parser = delete_instance_parser._action_groups.pop() - required_delete_instance_parser = delete_instance_parser.add_argument_group( - "required arguments" - ) + required_delete_instance_parser = delete_instance_parser.add_argument_group("required arguments") delete_instance_parser._action_groups.append(optional_delete_instance_parser) required_delete_instance_parser.add_argument( "--cloud_name", @@ -216,8 +209,7 @@ def set_config(self): ) required_delete_instance_parser.add_argument( "--key_pair", - help="The public key of an OpenSSH key" - " pair to be used for access to created instances", + help="The public key of an OpenSSH key pair to be used for access to created instances", action="store", dest="key_pair", required=True, @@ -255,9 +247,7 @@ def set_config(self): ) optional_set_config_parser = set_config_parser._action_groups.pop() - required_set_config_parser = set_config_parser.add_argument_group( - "required arguments" - ) + required_set_config_parser = set_config_parser.add_argument_group("required arguments") set_config_parser._action_groups.append(optional_set_config_parser) required_set_config_parser.add_argument( "--cloud_name", diff --git a/ods_ci/utils/scripts/testconfig/generateTestConfigFile.py b/ods_ci/utils/scripts/testconfig/generateTestConfigFile.py index 3b5aaf90c..430a22c4a 100644 --- a/ods_ci/utils/scripts/testconfig/generateTestConfigFile.py +++ b/ods_ci/utils/scripts/testconfig/generateTestConfigFile.py @@ -93,8 +93,7 @@ def parse_args(): parser.add_argument( "-s", "--skip-git-clone", - help="If this option is used then " - "cloning config git repo for ods-ci tests is skipped.", + help="If this option is used then cloning config git repo for ods-ci tests is skipped.", action="store_true", dest="skip_clone", ) @@ -136,9 +135,7 @@ def get_prometheus_url(project): Get prometheus url for the cluster. """ host_jsonpath = "{.spec.host}" - cmd = "oc get route prometheus -n {} -o jsonpath='{}'".format( - project, host_jsonpath - ) + cmd = "oc get route prometheus -n {} -o jsonpath='{}'".format(project, host_jsonpath) prometheus_url = execute_command(cmd) return "https://" + prometheus_url.strip("\n") @@ -194,30 +191,14 @@ def generate_test_config_file( data["S3"]["BUCKET_5"]["REGION"] = config_data["S3"]["BUCKET_5"]["REGION"] data["S3"]["BUCKET_5"]["ENDPOINT"] = config_data["S3"]["BUCKET_5"]["ENDPOINT"] data["ANACONDA_CE"]["ACTIVATION_KEY"] = config_data["ANACONDA_CE"]["ACTIVATION_KEY"] - data["OCP_CONSOLE_URL"] = config_data["TEST_CLUSTERS"][test_cluster][ - "OCP_CONSOLE_URL" - ] - data["ODH_DASHBOARD_URL"] = config_data["TEST_CLUSTERS"][test_cluster][ - "ODH_DASHBOARD_URL" - ] - data["TEST_USER"]["AUTH_TYPE"] = config_data["TEST_CLUSTERS"][test_cluster][ - "TEST_USER" - ]["AUTH_TYPE"] - data["TEST_USER"]["USERNAME"] = config_data["TEST_CLUSTERS"][test_cluster][ - "TEST_USER" - ]["USERNAME"] - data["TEST_USER"]["PASSWORD"] = config_data["TEST_CLUSTERS"][test_cluster][ - "TEST_USER" - ]["PASSWORD"] - data["OCP_ADMIN_USER"]["AUTH_TYPE"] = config_data["TEST_CLUSTERS"][test_cluster][ - "OCP_ADMIN_USER" - ]["AUTH_TYPE"] - data["OCP_ADMIN_USER"]["USERNAME"] = config_data["TEST_CLUSTERS"][test_cluster][ - "OCP_ADMIN_USER" - ]["USERNAME"] - data["OCP_ADMIN_USER"]["PASSWORD"] = config_data["TEST_CLUSTERS"][test_cluster][ - "OCP_ADMIN_USER" - ]["PASSWORD"] + data["OCP_CONSOLE_URL"] = config_data["TEST_CLUSTERS"][test_cluster]["OCP_CONSOLE_URL"] + data["ODH_DASHBOARD_URL"] = config_data["TEST_CLUSTERS"][test_cluster]["ODH_DASHBOARD_URL"] + data["TEST_USER"]["AUTH_TYPE"] = config_data["TEST_CLUSTERS"][test_cluster]["TEST_USER"]["AUTH_TYPE"] + data["TEST_USER"]["USERNAME"] = config_data["TEST_CLUSTERS"][test_cluster]["TEST_USER"]["USERNAME"] + data["TEST_USER"]["PASSWORD"] = config_data["TEST_CLUSTERS"][test_cluster]["TEST_USER"]["PASSWORD"] + data["OCP_ADMIN_USER"]["AUTH_TYPE"] = config_data["TEST_CLUSTERS"][test_cluster]["OCP_ADMIN_USER"]["AUTH_TYPE"] + data["OCP_ADMIN_USER"]["USERNAME"] = config_data["TEST_CLUSTERS"][test_cluster]["OCP_ADMIN_USER"]["USERNAME"] + data["OCP_ADMIN_USER"]["PASSWORD"] = config_data["TEST_CLUSTERS"][test_cluster]["OCP_ADMIN_USER"]["PASSWORD"] data["SSO"]["USERNAME"] = config_data["SSO"]["USERNAME"] data["SSO"]["PASSWORD"] = config_data["SSO"]["PASSWORD"] data["RHODS_BUILD"]["PULL_SECRET"] = config_data["RHODS_BUILD"]["PULL_SECRET"] diff --git a/ods_ci/utils/scripts/util.py b/ods_ci/utils/scripts/util.py index 6adbeaa9b..9f6d40191 100644 --- a/ods_ci/utils/scripts/util.py +++ b/ods_ci/utils/scripts/util.py @@ -32,12 +32,8 @@ def clone_config_repo(**kwargs): git_repo_with_credens = kwargs["git_repo"] if kwargs["git_username"] != "" and kwargs["git_password"] != "": git_credens = "{}:{}".format(kwargs["git_username"], kwargs["git_password"]) - git_repo_with_credens = re.sub( - r"(https://)(.*)", r"\1" + git_credens + "@" + r"\2", kwargs["git_repo"] - ) - cmd = "git clone {} -b {} {}".format( - git_repo_with_credens, kwargs["git_branch"], kwargs["repo_dir"] - ) + git_repo_with_credens = re.sub(r"(https://)(.*)", r"\1" + git_credens + "@" + r"\2", kwargs["git_repo"]) + cmd = "git clone {} -b {} {}".format(git_repo_with_credens, kwargs["git_branch"], kwargs["repo_dir"]) ret = subprocess.call(cmd, shell=True) if ret: print("Failed to clone repo {}.".format(kwargs["git_repo"])) @@ -90,9 +86,7 @@ def oc_login(ocp_console_url, username, password, timeout=600): """ cluster_api_url = ocp_console_url.replace("console-openshift-console.apps", "api") cluster_api_url = re.sub(r"/$", "", cluster_api_url) + ":6443" - cmd = "oc login -u {} -p {} {} --insecure-skip-tls-verify=true".format( - username, password, cluster_api_url - ) + cmd = "oc login -u {} -p {} {} --insecure-skip-tls-verify=true".format(username, password, cluster_api_url) count = 0 chk_flag = 0 while count <= timeout: diff --git a/pyproject.toml b/pyproject.toml index a37954122..f24fc7785 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,14 +42,18 @@ ruff = "0.1.11" requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" +[tool.black] +line-length = 120 +target-version = ['py311'] + [tool.isort] # https://pycqa.github.io/isort/docs/configuration/black_compatibility.html profile = "black" -line_length = 88 # align with black's default +line_length = 120 [tool.ruff] target-version = "py311" -line-length = 88 # align with black's default +line-length = 120 # https://docs.astral.sh/ruff/rules [tool.ruff.lint]