diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 45030b887fe..203c7dffbed 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -88,7 +88,7 @@ /src/ip-group/ @necusjz @kairu-ms @jsntcy -/src/connectedk8s/ @bavneetsingh16 @deeksha345 @anagg929 +/src/connectedk8s/ @bavneetsingh16 @deeksha345 @anagg929 @atchutbarli /src/storagesync/ @jsntcy diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index d44cb3899fb..d4e90167ae3 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -3,6 +3,11 @@ Release History =============== +1.10.8 +++++++ +* Add checks for the bundling feature flag in the 'connectedk8s connect' and 'connectedk8s update' commands. +* Display a more detailed error message when the 'connectedk8s upgrade' command fails with the bundling feature enabled. + 1.10.7 ++++++ * Added support for discovering additional k8s distributions and Infrastructure. diff --git a/src/connectedk8s/azext_connectedk8s/_constants.py b/src/connectedk8s/azext_connectedk8s/_constants.py index 4f173c668a2..3e04477e45b 100644 --- a/src/connectedk8s/azext_connectedk8s/_constants.py +++ b/src/connectedk8s/azext_connectedk8s/_constants.py @@ -63,13 +63,13 @@ AHB_Enum_Values = ["True", "False", "NotApplicable"] Feature_Values = ["cluster-connect", "azure-rbac", "custom-locations"] CRD_FOR_FORCE_DELETE = [ - "arccertificates.clusterconfig.azure.com", - "azureclusteridentityrequests.clusterconfig.azure.com", - "azureextensionidentities.clusterconfig.azure.com", - "connectedclusters.arc.azure.com", - "customlocationsettings.clusterconfig.azure.com", - "extensionconfigs.clusterconfig.azure.com", - "gitconfigs.clusterconfig.azure.com", + "arccertificates.clusterconfig.azure", + "azureclusteridentityrequests.clusterconfig.azure", + "azureextensionidentities.clusterconfig.azure", + "connectedclusters.arc.azure", + "customlocationsettings.clusterconfig.azure", + "extensionconfigs.clusterconfig.azure", + "gitconfigs.clusterconfig.azure", ] Helm_Install_Release_Userfault_Messages = [ "forbidden", @@ -134,6 +134,8 @@ Get_HelmRegistery_Path_Fault_Type = "helm-registry-path-fetch-error" Pull_HelmChart_Fault_Type = "helm-chart-pull-error" Export_HelmChart_Fault_Type = "helm-chart-export-error" +List_Extension_Config_Fault_Type = "kubernetes-list-extension-config-error" +List_Kubernetes_Namespaced_Pod_Fault_Type = "kubernetes-list-namespaced-pod-error" Get_Kubernetes_Distro_Fault_Type = "kubernetes-get-distribution-error" Get_Kubernetes_Namespace_Fault_Type = "kubernetes-get-namespace-error" Get_Kubernetes_Helm_Release_Namespace_Fault_Type = ( @@ -418,7 +420,7 @@ # Connect Precheck Diagnoser constants Cluster_Diagnostic_Checks_Job_Registry_Path = ( - "mcr.microsoft.com/azurearck8s/helmchart/stable/clusterdiagnosticchecks:0.2.2" + "azurearck8s/helmchart/stable/clusterdiagnosticchecks:0.2.2" ) Cluster_Diagnostic_Checks_Helm_Install_Failed_Fault_Type = ( "Error while installing cluster diagnostic checks helm release" @@ -481,8 +483,8 @@ DEFAULT_MAX_ONBOARDING_TIMEOUT_HELMVALUE_SECONDS = "1200" # URL constants -CLIENT_PROXY_MCR_TARGET = "mcr.microsoft.com/azureconnectivity/proxy" -HELM_MCR_URL = "mcr.microsoft.com/azurearck8s/helm" +CLIENT_PROXY_MCR_TARGET = "azureconnectivity/proxy" +HELM_MCR_URL = "azurearck8s/helm" HELM_VERSION = "v3.12.2" Download_And_Install_Kubectl_Fault_Type = "Failed to download and install kubectl" Azure_Access_Token_Variable = "AZURE_ACCESS_TOKEN" @@ -517,3 +519,20 @@ # "Application code shouldn't block the creation of resources for a resource provider that is in the registering state." # See https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider allowed_rp_registration_states = ["Registering", "Registered"] + +Connected_Cluster_Type = "connectedClusters" +Arc_Agent_Update_Validator_Job_Label = "agent-update-validator" + +Arc_Agentry_Bundle_Feature = "extensionSets" +Arc_Agentry_Bundle_Feature_Setting = "versionManagedExtensions" + +Bundle_Feature_Value_List = ["enabled", "disabled", "preview"] + +Extension_Config_CRD_Group = "clusterconfig.azure.com" +Extension_Config_CRD_Version = "v1beta1" +Extension_Config_CRD_Plural = "extensionconfigs" + +Get_Bundle_Feature_Flag_Fault_Type = "get-bundle-feature-flag-error" +Get_Extension_Config_Bundle_Property_Fault_Type = ( + "get-extension-config-bundle-property-error" +) diff --git a/src/connectedk8s/azext_connectedk8s/_precheckutils.py b/src/connectedk8s/azext_connectedk8s/_precheckutils.py index 257fde55463..b10c9a22241 100644 --- a/src/connectedk8s/azext_connectedk8s/_precheckutils.py +++ b/src/connectedk8s/azext_connectedk8s/_precheckutils.py @@ -20,6 +20,7 @@ import azext_connectedk8s._utils as azext_utils if TYPE_CHECKING: + from knack.commands import CLICommand from kubernetes.client import BatchV1Api, CoreV1Api logger = get_logger(__name__) @@ -30,6 +31,7 @@ def fetch_diagnostic_checks_results( + cmd: CLICommand, corev1_api_instance: CoreV1Api, batchv1_api_instance: BatchV1Api, helm_client_location: str, @@ -52,6 +54,7 @@ def fetch_diagnostic_checks_results( # Executing the cluster_diagnostic_checks job and fetching the logs obtained cluster_diagnostic_checks_container_log = ( executing_cluster_diagnostic_checks_job( + cmd, corev1_api_instance, batchv1_api_instance, helm_client_location, @@ -135,6 +138,7 @@ def fetch_diagnostic_checks_results( def executing_cluster_diagnostic_checks_job( + cmd: CLICommand, corev1_api_instance: CoreV1Api, batchv1_api_instance: BatchV1Api, helm_client_location: str, @@ -208,8 +212,10 @@ def executing_cluster_diagnostic_checks_job( ) return None + mcr_url = azext_utils.get_mcr_path(cmd) + chart_path = azext_utils.get_chart_path( - consts.Cluster_Diagnostic_Checks_Job_Registry_Path, + f"{mcr_url}/{consts.Cluster_Diagnostic_Checks_Job_Registry_Path}", kube_config, kube_context, helm_client_location, diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py index 55396ebcb11..4c6c03cf831 100644 --- a/src/connectedk8s/azext_connectedk8s/_utils.py +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -58,6 +58,28 @@ # pylint: disable=bare-except +def get_mcr_path(cmd: CLICommand) -> str: + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + + # default for public, mc, ff clouds + mcr_postfix = active_directory_array[2] + # special cases for USSec, exclude part of suffix + if len(active_directory_array) == 4 and active_directory_array[2] == "microsoft": + mcr_postfix = active_directory_array[3] + # special case for USNat + elif len(active_directory_array) == 5: + mcr_postfix = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + + mcr_url = f"mcr.microsoft.{mcr_postfix}" + return mcr_url + + def validate_connect_rp_location(cmd: CLICommand, location: str) -> None: subscription_id = ( os.getenv("AZURE_SUBSCRIPTION_ID") diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py index 56f7b218b7e..c655b4269de 100644 --- a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py @@ -13,16 +13,18 @@ from azure.cli.core import azclierror, telemetry from azure.cli.core.style import Style, print_styled_text from knack import log +from knack.commands import CLICommand import azext_connectedk8s._constants as consts import azext_connectedk8s._fileutils as file_utils +import azext_connectedk8s._utils as utils logger = log.get_logger(__name__) # Downloads client side proxy to connect to Arc Connectivity Platform def install_client_side_proxy( - arc_proxy_folder: Optional[str], debug: bool = False + cmd: CLICommand, arc_proxy_folder: Optional[str], debug: bool = False ) -> str: client_operating_system = _get_client_operating_system() client_architecture = _get_client_architeture() @@ -48,7 +50,11 @@ def install_client_side_proxy( ) _download_proxy_from_MCR( - install_dir, proxy_name, client_operating_system, client_architecture + cmd, + install_dir, + proxy_name, + client_operating_system, + client_architecture, ) _check_proxy_installation(install_dir, proxy_name, debug) @@ -64,15 +70,21 @@ def install_client_side_proxy( def _download_proxy_from_MCR( - dest_dir: str, proxy_name: str, operating_system: str, architecture: str + cmd: CLICommand, + dest_dir: str, + proxy_name: str, + operating_system: str, + architecture: str, ) -> None: - mar_target = f"{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/{architecture}/arc-proxy" + mcr_url = utils.get_mcr_path(cmd) + + mar_target = f"{mcr_url}/{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/{architecture}/arc-proxy" logger.debug( "Downloading Arc Connectivity Proxy from %s in Microsoft Artifact Regristy.", mar_target, ) - client = oras.client.OrasClient() + client = oras.client.OrasClient(hostname=mcr_url) t0 = time.time() try: diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py index 71345064af6..f72074d1b6e 100644 --- a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: from subprocess import Popen - from knack.commands import CLICommmand + from knack.commands import CLICommand from requests.models import Response from azext_connectedk8s.vendored_sdks.preview_2024_07_01.models import ( @@ -30,7 +30,7 @@ def handle_post_at_to_csp( - cmd: CLICommmand, + cmd: CLICommand, api_server_port: int, tenant_id: str, clientproxy_process: Popen[bytes], diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index 8f481f1111b..08061612e71 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -48,6 +48,7 @@ from knack.prompting import NoTTYException, prompt_y_n from kubernetes import client as kube_client from kubernetes import config +from kubernetes.client.rest import ApiException from kubernetes.config.kube_config import KubeConfigMerger from packaging import version @@ -81,7 +82,7 @@ from azure.cli.core.commands import AzCliCommand from azure.core.polling import LROPoller from Crypto.PublicKey.RSA import RsaKey - from knack.commands import CLICommmand + from knack.commands import CLICommand from kubernetes.client import V1NodeList from kubernetes.config.kube_config import ConfigNode from requests.models import Response @@ -99,7 +100,7 @@ def create_connectedk8s( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -235,6 +236,10 @@ def create_connectedk8s( configuration_settings, configuration_protected_settings, ) + + # Validate and update bundle feature flag value if provided + validate_connect_cluster_bundle_feature_flag_value(configuration_settings, yes) + arc_agentry_configurations = generate_arc_agent_configuration( configuration_settings, configuration_protected_settings ) @@ -301,7 +306,7 @@ def create_connectedk8s( # Install kubectl and helm try: kubectl_client_location = install_kubectl_client() - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) except Exception as e: raise CLIInternalError( f"An exception has occured while trying to perform kubectl or helm install: {e}" @@ -344,6 +349,7 @@ def create_connectedk8s( # Performing cluster-diagnostic-checks diagnostic_checks, storage_space_available = ( precheckutils.fetch_diagnostic_checks_results( + cmd, api_instance, batchv1_api_instance, helm_client_location, @@ -742,7 +748,9 @@ def create_connectedk8s( "Cleaning up the stale arc agents present on the cluster before starting new onboarding." ) # Explicit CRD Deletion - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) # Cleaning up the cluster utils.delete_arc_agents( release_namespace, @@ -773,7 +781,9 @@ def create_connectedk8s( raise ArgumentUsageError(err_msg, recommendation=reco_msg) # cleanup of stuck CRD if release namespace is not present/deleted - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) print( f"Step: {utils.get_utctimestring()}: Check if ResourceGroup exists. Try to create if it doesn't" @@ -1043,7 +1053,7 @@ def validate_existing_provisioned_cluster_for_reput( raise InvalidArgumentValueError(err_msg) -def send_cloud_telemetry(cmd: CLICommmand) -> str: +def send_cloud_telemetry(cmd: CLICommand) -> str: telemetry.add_extension_event( "connectedk8s", {"Context.Default.AzureCLI.AzureCloud": cmd.cli_ctx.cloud.name} ) @@ -1153,7 +1163,7 @@ def check_kube_connection() -> str: assert False -def install_helm_client() -> str: +def install_helm_client(cmd: CLICommand) -> str: print( f"Step: {utils.get_utctimestring()}: Install Helm client if it does not exist" ) @@ -1219,13 +1229,16 @@ def install_helm_client() -> str: logger.warning( "Downloading helm client for first time. This can take few minutes..." ) - client = oras.client.OrasClient() + + mcr_url = utils.get_mcr_path(cmd) + + client = oras.client.OrasClient(hostname=mcr_url) retry_count = 3 retry_delay = 5 for i in range(retry_count): try: client.pull( - target=f"{consts.HELM_MCR_URL}:{artifactTag}", + target=f"{mcr_url}/{consts.HELM_MCR_URL}:{artifactTag}", outdir=download_location, ) break @@ -1289,8 +1302,22 @@ def connected_cluster_exists( return True -def get_default_config_dp_endpoint(cmd: CLICommmand, location: str) -> str: - cloud_based_domain = cmd.cli_ctx.cloud.endpoints.active_directory.split(".")[2] +def get_default_config_dp_endpoint(cmd: CLICommand, location: str) -> str: + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + # default for public, mc, ff clouds + cloud_based_domain = active_directory_array[2] + # special cases for USSec/USNat clouds + if len(active_directory_array) == 4: + cloud_based_domain = active_directory_array[2] + "." + active_directory_array[3] + elif len(active_directory_array) == 5: + cloud_based_domain = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + config_dp_endpoint = ( f"https://{location}.dp.kubernetesconfiguration.azure.{cloud_based_domain}" ) @@ -1298,7 +1325,7 @@ def get_default_config_dp_endpoint(cmd: CLICommmand, location: str) -> str: def get_config_dp_endpoint( - cmd: CLICommmand, + cmd: CLICommand, location: str, values_file: str | None, arm_metadata: dict[str, Any] | None = None, @@ -1512,6 +1539,269 @@ def set_security_profile(enable_workload_identity: bool) -> SecurityProfile: return security_profile +def get_installed_bundle_extensions() -> list[str]: + api_instance = kube_client.CustomObjectsApi() + + try: + extension_configs = api_instance.list_cluster_custom_object( + group=consts.Extension_Config_CRD_Group, + version=consts.Extension_Config_CRD_Version, + plural=consts.Extension_Config_CRD_Plural, + ) + except Exception as e: + if isinstance(e, ApiException) and e.status == 404: + # If the ExtensionConfig resource is not found, return an empty list + return [] + + utils.kubernetes_exception_handler( + e, + consts.List_Extension_Config_Fault_Type, + "Failed to list ExtensionConfigs", + error_message="Failed to list ExtensionConfigs: ", + ) + + try: + installed_extensions = [] + for config in extension_configs.get("items", []): + config_spec = config.get("spec", {}) + isPartOfBundle = config_spec.get("isPartOfBundle") + isDependentOnBundle = config_spec.get("isDependentOnBundle") + extensionType = config_spec.get("extensionType") + if (isPartOfBundle or isDependentOnBundle) and extensionType: + installed_extensions.append(extensionType) + + except Exception as e: # pylint: disable=broad-except + telemetry.set_exception( + exception=e, + fault_type=consts.Get_Extension_Config_Bundle_Property_Fault_Type, + summary="Failed to get bundle properties from the Arc extension config", + ) + raise CLIInternalError( + f"Failed to get bundle properties from the Arc extension config: {e}" + ) + + return installed_extensions + + +def get_bundle_feature_flag_from_arc_agentry_config( + current_arc_agentry_config: list[ArcAgentryConfigurations], +) -> str | None: + try: + for agentry_config in current_arc_agentry_config or []: + if ( + agentry_config.feature == consts.Arc_Agentry_Bundle_Feature + and agentry_config.settings + and ( + consts.Arc_Agentry_Bundle_Feature_Setting in agentry_config.settings + ) + ): + return agentry_config.settings[ + consts.Arc_Agentry_Bundle_Feature_Setting + ].lower() + return None + + except Exception as e: # pylint: disable=broad-except + telemetry.set_exception( + exception=e, + fault_type=consts.Get_Bundle_Feature_Flag_Fault_Type, + summary="Failed to get bundle feature flag from Arc Agentry Configurations", + ) + raise CLIInternalError( + f"Failed to get bundle feature flag from Arc Agentry Configurations: {e}" + ) + + +def get_bundle_feature_flag_from_configuration_settings( + configuration_settings: dict[str, Any], +) -> str | None: + try: + if not configuration_settings: + return None + + settings = configuration_settings.get(consts.Arc_Agentry_Bundle_Feature, {}) + value = settings.get(consts.Arc_Agentry_Bundle_Feature_Setting) + return value if value is None else value.lower() + + except Exception as e: # pylint: disable=broad-except + telemetry.set_exception( + exception=e, + fault_type=consts.Get_Bundle_Feature_Flag_Fault_Type, + summary="Failed to get bundle feature flag from configuration settings", + ) + raise CLIInternalError( + f"Failed to get bundle feature flag from configuration settings: {e}" + ) + + +def validate_bundle_feature_flag_value( + configuration_settings: dict[str, Any], +) -> str | None: + print( + f"Step: {utils.get_utctimestring()}: Validating the bundle feature flag value" + ) + value = get_bundle_feature_flag_from_configuration_settings(configuration_settings) + + if value is not None: + # Remove leading and trailing whitespace and quotes + value = value.strip().strip("'\"") + + if value and value not in consts.Bundle_Feature_Value_List: + err_msg = ( + f"Not supported value for the feature flag '{consts.Arc_Agentry_Bundle_Feature_Setting}': " + f"'{value}'. Please specify a value from the list: {consts.Bundle_Feature_Value_List}." + ) + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + raise InvalidArgumentValueError(err_msg) + + configuration_settings[consts.Arc_Agentry_Bundle_Feature][ + consts.Arc_Agentry_Bundle_Feature_Setting + ] = value + print( + f"Step: {utils.get_utctimestring()}: Setting the bundle feature flag value to '{value}'" + ) + + return value + + +def validate_connect_cluster_bundle_feature_flag_value( + configuration_settings: dict[str, Any], + yes: bool = False, +): + bundle_feature_flag_value = validate_bundle_feature_flag_value( + configuration_settings + ) + + # If the bundle feature flag value is None, skip the validation + if bundle_feature_flag_value is None: + return + + if bundle_feature_flag_value == "preview": + confirmation_message = ( + f"You are about to enter the 'preview' mode for {consts.Arc_Agentry_Bundle_Feature_Setting}. " + "In this mode, all SLA support will be discontinued, and the cluster will remain in 'preview' mode " + "until it is disconnected from Arc. Are you sure you want to proceed? " + ) + + utils.user_confirmation(confirmation_message, yes) + + logger.warning( + "Entered %s 'preview' mode. All SLA support is discontinued, and the cluster will remain in 'preview' mode " + "until it is disconnected from Arc.", + consts.Arc_Agentry_Bundle_Feature_Setting, + ) + + elif bundle_feature_flag_value == "disabled": + err_msg = ( + f"{consts.Arc_Agentry_Bundle_Feature_Setting} 'disabled' mode can only be set using 'az connectedk8s update'. " + f"To keep the bundle feature flag off during cluster connection, remove " + f"{consts.Arc_Agentry_Bundle_Feature_Setting} from the --config." + ) + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + raise ArgumentUsageError(err_msg) + + +def validate_update_cluster_bundle_feature_flag_value( + cmd: CLICommand, + current_arc_agentry_config: list[ArcAgentryConfigurations], + configuration_settings: dict[str, Any], + resource_group_name: str, + cluster_name: str, +): + bundle_feature_flag_value = validate_bundle_feature_flag_value( + configuration_settings + ) + + # If the bundle feature flag value is None, skip the validation + if bundle_feature_flag_value is None: + return + + current_bundle_feature_flag_value = get_bundle_feature_flag_from_arc_agentry_config( + current_arc_agentry_config + ) + + if bundle_feature_flag_value == "preview": + err_msg = ( + f"{consts.Arc_Agentry_Bundle_Feature_Setting} 'preview' mode can only be enabled when a cluster " + "is first connected to Arc with 'az connectedk8s connect'. Updating the preview mode config with " + "'az connectedk8s update' is not allowed." + ) + + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + + raise ArgumentUsageError(err_msg) + + if current_bundle_feature_flag_value == "preview": + err_msg = ( + f"The cluster is in {consts.Arc_Agentry_Bundle_Feature_Setting} 'preview' mode, " + "updating the value is not allowed." + ) + + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + + raise ArgumentUsageError(err_msg) + + invalid_transition = ( + current_bundle_feature_flag_value == "enabled" + and bundle_feature_flag_value == "" + ) or ( + current_bundle_feature_flag_value == "" + and bundle_feature_flag_value == "disabled" + ) + + if invalid_transition: + err_msg = ( + f"Could not set {consts.Arc_Agentry_Bundle_Feature}.{consts.Arc_Agentry_Bundle_Feature_Setting} from " + f"'{current_bundle_feature_flag_value}' to '{bundle_feature_flag_value}'." + ) + + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + raise ArgumentUsageError(err_msg) + + # If the bundle feature flag is set to 'disabled', check if any bundle extensions are installed + if ( + current_bundle_feature_flag_value == "enabled" + and bundle_feature_flag_value == "disabled" + ): + installed_bundle_extensions = get_installed_bundle_extensions() + + if installed_bundle_extensions: + err_msg = ( + f"Could not set {consts.Arc_Agentry_Bundle_Feature}.{consts.Arc_Agentry_Bundle_Feature_Setting} to " + f"'disabled' - detected the following extension types on the cluster: {installed_bundle_extensions}.\n" + f"Please remove them with 'az k8s-extension delete --cluster-name " + f"--cluster-type --resource-group --name ' " + f"and try turning off the feature again." + ) + + telemetry.set_exception( + exception=err_msg, + fault_type=consts.Invalid_Argument_Fault_Type, + summary="Invalid value for the bundle feature flag", + ) + + raise ArgumentUsageError(err_msg) + + def generate_arc_agent_configuration( configuration_settings: dict[str, Any], redacted_protected_settings: dict[str, Any], @@ -1733,7 +2023,7 @@ def list_connectedk8s( def delete_connectedk8s( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -1785,7 +2075,7 @@ def delete_connectedk8s( check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Check Release Existance release_namespace = utils.get_release_namespace( @@ -1802,10 +2092,14 @@ def delete_connectedk8s( print(f"Step: {utils.get_utctimestring()}: Performing Force Delete") kubectl_client_location = install_kubectl_client() - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() # Explicit CRD Deletion - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) if release_namespace: utils.delete_arc_agents( @@ -1820,7 +2114,9 @@ def delete_connectedk8s( return if not release_namespace: - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() return # Loading config map @@ -1871,7 +2167,9 @@ def delete_connectedk8s( recommendation=reco_str, ) - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() else: telemetry.set_exception( exception="Unable to delete connected cluster", @@ -1952,15 +2250,26 @@ def delete_cc_resource( resource_group_name: str, cluster_name: str, no_wait: bool, + force: bool = False, ) -> LROPoller[None]: print(f"Step: {utils.get_utctimestring()}: Deleting ARM resource") try: - poller: LROPoller[None] = sdk_no_wait( - no_wait, - client.begin_delete, - resource_group_name=resource_group_name, - cluster_name=cluster_name, - ) + poller: LROPoller[None] + if force: + poller = sdk_no_wait( + no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cluster_name=cluster_name, + params={"force": True}, + ) + else: + poller = sdk_no_wait( + no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cluster_name=cluster_name, + ) return poller except Exception as e: utils.arm_exception_handler( @@ -1995,7 +2304,7 @@ def update_connected_cluster_internal( def update_connected_cluster( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2072,9 +2381,6 @@ def update_connected_cluster( configuration_settings, configuration_protected_settings, ) - arc_agentry_configurations = generate_arc_agent_configuration( - configuration_settings, redacted_protected_values - ) # Fetch Connected Cluster for agent version connected_cluster = client.get(resource_group_name, cluster_name) @@ -2157,8 +2463,21 @@ def update_connected_cluster( # if the user had not logged in. kubernetes_version = check_kube_connection() + # Validate and update bundle feature flag value if provided + validate_update_cluster_bundle_feature_flag_value( + cmd, + connected_cluster.arc_agentry_configurations, + configuration_settings, + resource_group_name, + cluster_name, + ) + + arc_agentry_configurations = generate_arc_agent_configuration( + configuration_settings, redacted_protected_values + ) + # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -2351,7 +2670,7 @@ def update_connected_cluster( def upgrade_agents( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2396,7 +2715,7 @@ def upgrade_agents( api_instance = kube_client.CoreV1Api() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Check Release Existence release_namespace = utils.get_release_namespace( @@ -2654,6 +2973,36 @@ def upgrade_agents( for message in consts.Helm_Install_Release_Userfault_Messages ): telemetry.set_user_fault() + + namespace = consts.Arc_Namespace + label_selector = f"job-name={consts.Arc_Agent_Update_Validator_Job_Label}" + + try: + api_instance = kube_client.CoreV1Api() + + # Get the list of pods matching the label + pods = api_instance.list_namespaced_pod( + namespace=namespace, label_selector=label_selector + ) + + # Extract the terminated message from the container + if pods.items: + pod = pods.items[0] + container_statuses = pod.status.container_statuses + if container_statuses: + state = container_statuses[0].state + if state and state.terminated: + helm_upgrade_error_message = state.terminated.message + + except Exception as e: + utils.kubernetes_exception_handler( + e, + consts.List_Kubernetes_Namespaced_Pod_Fault_Type, + "Unable to list pods", + error_message=f"Unable to list pods in namespace '{namespace}' with label selector '{label_selector}': ", + raise_error=False, + ) + telemetry.set_exception( exception=error_helm_upgrade.decode("ascii"), fault_type=consts.Install_HelmRelease_Fault_Type, @@ -2797,7 +3146,7 @@ def get_all_helm_values( def enable_features( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2892,7 +3241,7 @@ def enable_features( kubernetes_version = check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -3030,7 +3379,7 @@ def enable_features( def disable_features( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3085,7 +3434,7 @@ def disable_features( kubernetes_version = check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -3169,7 +3518,7 @@ def disable_features( def get_chart_and_disable_features( - cmd: CLICommmand, + cmd: CLICommand, connected_cluster: ConnectedCluster, kube_config: str | None, kube_context: str | None, @@ -3260,7 +3609,7 @@ def get_chart_and_disable_features( def disable_cluster_connect( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3467,7 +3816,7 @@ def handle_merge( def client_side_proxy_wrapper( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3535,7 +3884,7 @@ def client_side_proxy_wrapper( if "--debug" in cmd.cli_ctx.data["safe_params"]: debug_mode = True - install_location = proxybinaryutils.install_client_side_proxy(None, debug_mode) + install_location = proxybinaryutils.install_client_side_proxy(cmd, None, debug_mode) args.append(install_location) install_dir = os.path.dirname(install_location) @@ -3638,7 +3987,7 @@ def client_side_proxy_wrapper( def client_side_proxy_main( - cmd: CLICommmand, + cmd: CLICommand, tenant_id: str, client: ConnectedClusterOperations, resource_group_name: str, @@ -3709,7 +4058,7 @@ def client_side_proxy_main( def client_side_proxy( - cmd: CLICommmand, + cmd: CLICommand, tenant_id: str, client: ConnectedClusterOperations, resource_group_name: str, @@ -3842,7 +4191,7 @@ def client_side_proxy( def check_cl_registration_and_get_oid( - cmd: CLICommmand, cl_oid: str | None, subscription_id: str | None + cmd: CLICommand, cl_oid: str | None, subscription_id: str | None ) -> tuple[bool, str]: print( f"Step: {utils.get_utctimestring()}: Checking Custom Location(Microsoft.ExtendedLocation) RP Registration state for this Subscription, and attempt to get the Custom Location Object ID (OID),if registered" @@ -3881,7 +4230,7 @@ def check_cl_registration_and_get_oid( return enable_custom_locations, custom_locations_oid -def get_custom_locations_oid(cmd: CLICommmand, cl_oid: str | None) -> str: +def get_custom_locations_oid(cmd: CLICommand, cl_oid: str | None) -> str: try: graph_client = graph_client_factory(cmd.cli_ctx) app_id = "bc313c14-388c-4e7d-a58e-70017303ee3b" @@ -3942,7 +4291,7 @@ def get_custom_locations_oid(cmd: CLICommmand, cl_oid: str | None) -> str: def troubleshoot( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3985,7 +4334,7 @@ def troubleshoot( load_kube_config(kube_config, kube_context, skip_ssl_verification) # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Install kubectl client kubectl_client_location = install_kubectl_client() @@ -4392,16 +4741,36 @@ def install_kubectl_client() -> str: def crd_cleanup_force_delete( - kubectl_client_location: str, kube_config: str | None, kube_context: str | None + cmd: CLICommand, + kubectl_client_location: str, + kube_config: str | None, + kube_context: str | None, ) -> None: print(f"Step: {utils.get_utctimestring()}: Deleting Arc CRDs") + + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + # default for public, mc, ff clouds + cloud_based_domain = active_directory_array[2] + # special cases for USSec/USNat clouds + if len(active_directory_array) == 4: + cloud_based_domain = active_directory_array[2] + "." + active_directory_array[3] + elif len(active_directory_array) == 5: + cloud_based_domain = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + timeout_for_crd_deletion = "20s" for crds in consts.CRD_FOR_FORCE_DELETE: + full_crds = f"{crds}.{cloud_based_domain}" cmd_helm_delete = [ kubectl_client_location, "delete", "crds", - crds, + full_crds, "--ignore-not-found", "--wait", "--timeout", @@ -4424,7 +4793,8 @@ def crd_cleanup_force_delete( # Patch if CRD is in Terminating state for crds in consts.CRD_FOR_FORCE_DELETE: - cmd = [kubectl_client_location, "get", "crd", crds, "-ojson"] + full_crds = f"{crds}.{cloud_based_domain}" + cmd = [kubectl_client_location, "get", "crd", full_crds, "-ojson"] if kube_config: cmd.extend(["--kubeconfig", kube_config]) if kube_context: @@ -4441,7 +4811,7 @@ def crd_cleanup_force_delete( kubectl_client_location, "patch", "crd", - crds, + full_crds, "--type=merge", "--patch-file", yaml_file_path, diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/ArcAgentryValues.json b/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/ArcAgentryValues.json new file mode 100644 index 00000000000..8e15c5bdde0 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/ArcAgentryValues.json @@ -0,0 +1,7 @@ +{ + "systemDefaultValues": { + "agent-update-validator": { + "config_dp_endpoint_override": "http://fake-dp-server-service.graytowntest.svc.cluster.local:80" + } + } +} \ No newline at end of file diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/fake_ext_config.yml b/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/fake_ext_config.yml new file mode 100644 index 00000000000..6d814b80974 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/agent_update_validator_test_config/fake_ext_config.yml @@ -0,0 +1,19 @@ +apiVersion: clusterconfig.azure.com/v1beta1 +kind: ExtensionConfig +metadata: + name: fake-ext-config + namespace: azure-arc +spec: + dependentBundles: + - appdevaddons + extensionType: microsoft.graytown.testextension + isDependentOnBundle: true + parameter: + releaseTrain: Preview + repoUrl: https://graytownacr.azurecr.io/helm/graytowntest + scope: cluster + version: 1.302.0 +status: + syncStatus: + isSyncedWithAzure: true + lastSyncTime: '2025-06-23T21:14:39.000Z' diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py index 9d899e786da..a4df9efdffe 100644 --- a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py @@ -12,13 +12,20 @@ import stat import subprocess import time -from subprocess import PIPE +from datetime import datetime +from subprocess import PIPE, Popen import oras.client # type: ignore[import-untyped] import psutil import requests +import yaml from azure.cli.core import get_default_cli -from azure.cli.core.azclierror import RequiredArgumentMissingError, ValidationError +from azure.cli.core.azclierror import ( + ArgumentUsageError, + InvalidArgumentValueError, + RequiredArgumentMissingError, + ValidationError, +) from azure.cli.testsdk import ( # pylint: disable=import-error LiveScenarioTest, ResourceGroupPreparer, @@ -110,10 +117,11 @@ def install_helm_client(): logger.warning( "Downloading helm client for first time. This can take few minutes..." ) - client = oras.client.OrasClient() + client = oras.client.OrasClient(hostname="mcr.microsoft.com") try: client.pull( - target=f"{consts.HELM_MCR_URL}:{artifactTag}", outdir=download_location + target=f"mcr.microsoft.com/{consts.HELM_MCR_URL}:{artifactTag}", + outdir=download_location, ) except Exception as e: logger.warning("Failed to download helm client." + str(e)) @@ -172,6 +180,35 @@ def install_kubectl_client(): return +def get_bundle_feature_flag_from_config_map( + kubectl_client_location: str, kube_config: str | None, kube_context: str | None +) -> str | None: + cmd = [ + kubectl_client_location, + "get", + "configmap", + "azure-clusterconfig", + "-n", + "azure-arc", + "-o", + "jsonpath={.data.EXTENSION_BUNDLE_ENABLED_FEATURE_FLAG}", + ] + if kube_config: + cmd.extend(["--kubeconfig", kube_config]) + if kube_context: + cmd.extend(["--context", kube_context]) + + cmd_output = Popen(cmd, stdout=PIPE, stderr=PIPE) + bundle_feature_flag, stderr = cmd_output.communicate() + if cmd_output.returncode == 0: + return bundle_feature_flag.decode() + else: + logger.warning( + "Failed to get bundle feature flag from config map: " + str(stderr.decode()) + ) + return None + + class Connectedk8sScenarioTest(LiveScenarioTest): @live_only() @ResourceGroupPreparer( @@ -266,6 +303,364 @@ def test_connect_withoidcandselfhostedissuer(self, resource_group): # delete the kube config os.remove(_get_test_data_file(managed_cluster_name + "-config.yaml")) + @live_only() + @ResourceGroupPreparer( + name_prefix="conk8stest", location=CONFIG["location"], random_name_length=16 + ) + def test_connect_withbundlefeatureflag(self, resource_group): + managed_cluster_name = self.create_random_name(prefix="test-connect", length=24) + kubeconfig = _get_test_data_file(managed_cluster_name + "-config.yaml") + self.kwargs.update( + { + "rg": resource_group, + "name": self.create_random_name(prefix="cc-", length=12), + "kubeconfig": kubeconfig, + "managed_cluster_name": managed_cluster_name, + "location": CONFIG["location"], + } + ) + + self.cmd("aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys") + self.cmd( + "aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin" + ) + + with self.assertRaisesRegex( + InvalidArgumentValueError, "Not supported value for the feature flag" + ): + self.cmd( + "connectedk8s connect -g {rg} -n {name} --location {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='off'" + ) + + with self.assertRaisesRegex( + ArgumentUsageError, + "versionManagedExtensions 'disabled' mode can only be set using 'az connectedk8s update'.", + ): + self.cmd( + "connectedk8s connect -g {rg} -n {name} --location {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='disabled'" + ) + + with self.assertLogs(level="WARNING") as cm: + self.cmd( + "connectedk8s connect -g {rg} -n {name} -l {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='preview' --yes", + checks=[ + self.check("resourceGroup", "{rg}"), + self.check("name", "{name}"), + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "preview", + ), + ], + ) + + self.assertIn( + "All SLA support is discontinued, and the cluster will remain in 'preview' mode until it is disconnected from Arc.", + "".join(cm.output), + ) + + kubectl_client_location = install_kubectl_client() + configmap_bundle_feature_flag = get_bundle_feature_flag_from_config_map( + kubectl_client_location, kubeconfig, f"{managed_cluster_name}-admin" + ) + self.assertEqual(configmap_bundle_feature_flag, "preview") + + with self.assertRaisesRegex( + ArgumentUsageError, + "The cluster is in versionManagedExtensions 'preview' mode, updating the value is not allowed.", + ): + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='enabled'" + ) + + self.cmd( + "connectedk8s delete -g {rg} -n {name} --kube-config {kubeconfig} --kube-context " + "{managed_cluster_name}-admin --yes" + ) + + self.cmd( + "connectedk8s connect -g {rg} -n {name} -l {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='enabled'", + checks=[ + self.check("resourceGroup", "{rg}"), + self.check("name", "{name}"), + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "enabled", + ), + ], + ) + + configmap_bundle_feature_flag = get_bundle_feature_flag_from_config_map( + kubectl_client_location, kubeconfig, f"{managed_cluster_name}-admin" + ) + self.assertEqual(configmap_bundle_feature_flag, "enabled") + + @live_only() + @ResourceGroupPreparer( + name_prefix="conk8stest", location=CONFIG["location"], random_name_length=16 + ) + def test_update_withbundlefeatureflag(self, resource_group): + managed_cluster_name = self.create_random_name(prefix="test-update", length=24) + kubeconfig = _get_test_data_file(managed_cluster_name + "-config.yaml") + self.kwargs.update( + { + "rg": resource_group, + "name": self.create_random_name(prefix="cc-", length=12), + "kubeconfig": kubeconfig, + "managed_cluster_name": managed_cluster_name, + "location": CONFIG["location"], + } + ) + + self.cmd("aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys") + self.cmd( + "aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin" + ) + + self.cmd( + "connectedk8s connect -g {rg} -n {name} -l {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='enabled'", + checks=[ + self.check("resourceGroup", "{rg}"), + self.check("name", "{name}"), + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "enabled", + ), + ], + ) + + kubectl_client_location = install_kubectl_client() + configmap_bundle_feature_flag = get_bundle_feature_flag_from_config_map( + kubectl_client_location, kubeconfig, f"{managed_cluster_name}-admin" + ) + self.assertEqual(configmap_bundle_feature_flag, "enabled") + + with self.assertRaisesRegex( + InvalidArgumentValueError, "Not supported value for the feature flag" + ): + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='on'" + ) + + with self.assertRaisesRegex( + ArgumentUsageError, + "Updating the preview mode config with 'az connectedk8s update' is not allowed.", + ): + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='preview'" + ) + + with self.assertRaisesRegex( + ArgumentUsageError, + "Could not set extensionSets.versionManagedExtensions from 'enabled' to ''", + ): + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions=''" + ) + + # Without any bundle extensions installed on the cluster, the bundle feature flag can be set to 'disabled' + # All leading and trailing single and double quotes and whitespaces should be automatically stripped + self.cmd( + 'connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions=" disabled"', + checks=[ + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "disabled", + ), + ], + ) + + configmap_bundle_feature_flag = get_bundle_feature_flag_from_config_map( + kubectl_client_location, kubeconfig, f"{managed_cluster_name}-admin" + ) + self.assertEqual(configmap_bundle_feature_flag, "disabled") + + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='enabled'", + checks=[ + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "enabled", + ), + ], + ) + + self.cmd( + "k8s-extension create --cluster-name {name} --resource-group {rg} --cluster-type connectedClusters \ + --extension-type microsoft.iotoperations.platform --name azure-iot-operations-platform \ + --release-train preview --auto-upgrade-minor-version False --config installTrustManager=true \ + --config installCertManager=true --version 0.7.24 --release-namespace cert-manager --scope cluster" + ) + + self.cmd( + "k8s-extension create --cluster-name {name} --resource-group {rg} --cluster-type connectedClusters \ + --extension-type microsoft.azure.secretstore --name azure-secret-store --auto-upgrade-minor-version False \ + --config rotationPollIntervalInSeconds=120 --config validatingAdmissionPolicies.applyPolicies=false \ + --scope cluster" + ) + + # With bundle extensions installed on the cluster, the bundle feature flag cannot be set to 'disabled' + with self.assertRaisesRegex( + ArgumentUsageError, "detected the following extension types on the cluster" + ): + self.cmd( + "connectedk8s update -g {rg} -n {name} --auto-upgrade false --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='disabled'" + ) + + configmap_bundle_feature_flag = get_bundle_feature_flag_from_config_map( + kubectl_client_location, kubeconfig, f"{managed_cluster_name}-admin" + ) + self.assertEqual(configmap_bundle_feature_flag, "enabled") + + @live_only() + @ResourceGroupPreparer( + name_prefix="conk8stest", location=CONFIG["location"], random_name_length=16 + ) + def test_upgrade_with_agentupdatevalidator(self, resource_group): + managed_cluster_name = self.create_random_name(prefix="test-upgrade", length=24) + kubeconfig = _get_test_data_file(managed_cluster_name + "-config.yaml") + self.kwargs.update( + { + "rg": resource_group, + "name": self.create_random_name(prefix="cc-", length=12), + "kubeconfig": kubeconfig, + "managed_cluster_name": managed_cluster_name, + "location": CONFIG["location"], + } + ) + self.cmd("aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys") + self.cmd( + "aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin" + ) + + self.cmd( + "connectedk8s connect -g {rg} -n {name} -l {location} --disable-auto-upgrade --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin --config extensionSets.versionManagedExtensions='enabled'", + checks=[ + self.check("resourceGroup", "{rg}"), + self.check("name", "{name}"), + self.check( + "arcAgentryConfigurations[0].settings.versionManagedExtensions", + "enabled", + ), + ], + ) + + ns = "azure-arc" + config_dir = os.path.join( + os.path.dirname(__file__), "agent_update_validator_test_config" + ) + arc_agent_values_path = os.path.join(config_dir, "ArcAgentryValues.json") + fake_ext_config_path = os.path.join(config_dir, "fake_ext_config.yml") + + with open(fake_ext_config_path) as file: + data = yaml.safe_load(file) + + # Update the lastSyncTime to the current time + current_time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z") + data["status"]["syncStatus"]["lastSyncTime"] = current_time + + with open(fake_ext_config_path, "w") as file: + yaml.dump(data, file, default_flow_style=False) + + kubectl_client_location = install_kubectl_client() + + # Create the fake extension config to simulate an extension that depends on the bundle + subprocess.run( + [ + kubectl_client_location, + "apply", + "-f", + fake_ext_config_path, + "--namespace", + ns, + "--kubeconfig", + kubeconfig, + "--context", + f"{managed_cluster_name}-admin", + ] + ) + + os.environ["HELMVALUESPATH"] = arc_agent_values_path + + with self.assertRaisesRegex( + CLIError, "Error: Failed to validate agent update.*?no such host" + ): + self.cmd( + "connectedk8s upgrade -g {rg} -n {name} --agent-version 1.26.0 --kube-config {kubeconfig} \ + --kube-context {managed_cluster_name}-admin", + ) + + os.environ["HELMVALUESPATH"] = "" + + # Remove the finalizers from the fake extension config to allow deletion + subprocess.run( + [ + kubectl_client_location, + "patch", + "extensionconfig", + "fake-ext-config", + "--namespace", + ns, + "--type=json", + "-p", + '[{"op": "remove", "path": "/metadata/finalizers"}]', + "--kubeconfig", + kubeconfig, + "--context", + f"{managed_cluster_name}-admin", + ] + ) + + subprocess.Popen( + [ + kubectl_client_location, + "delete", + "extensionconfig", + "fake-ext-config", + "--namespace", + ns, + "--kubeconfig", + kubeconfig, + "--context", + f"{managed_cluster_name}-admin", + ] + ) + + time.sleep(60) + + cmd_output = subprocess.Popen( + [ + kubectl_client_location, + "get", + "extensionconfig", + "fake-ext-config", + "--namespace", + ns, + "--kubeconfig", + kubeconfig, + "--context", + f"{managed_cluster_name}-admin", + ], + stdout=PIPE, + stderr=PIPE, + ) + _, error_get_ext_config = cmd_output.communicate() + + # Should fail to get the extension config as it has been deleted + assert cmd_output.returncode != 0 + @live_only() @ResourceGroupPreparer( name_prefix="conk8stest", location=CONFIG["location"], random_name_length=16 diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index 7625c142670..4f9959412ad 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -13,7 +13,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = "1.10.7" +VERSION = "1.10.8" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000000..29f33294b8b --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,9 @@ +settings.json +tmp/ +bin/* +!bin/connectedk8s-1.0.0-py3-none-any.whl +!bin/k8s_extension-0.3.0-py3-none-any.whl +!bin/k8s_extension_private-0.1.0-py3-none-any.whl +!bin/k8s_configuration-1.0.0-py3-none-any.whl +!bin/connectedk8s-values.yaml +*.xml \ No newline at end of file diff --git a/testing/Bootstrap.ps1 b/testing/Bootstrap.ps1 new file mode 100644 index 00000000000..ad21cfddad2 --- /dev/null +++ b/testing/Bootstrap.ps1 @@ -0,0 +1,30 @@ +param ( + [switch] $SkipInstall, + [switch] $CI +) + +# Disable confirm prompt for script +az config set core.disable_confirm_prompt=true + +# Configuring the environment +$ENVCONFIG = Get-Content -Path $PSScriptRoot/settings.json | ConvertFrom-Json + +az account set --subscription $ENVCONFIG.subscriptionId + +if (-not (Test-Path -Path $PSScriptRoot/tmp)) { + New-Item -ItemType Directory -Path $PSScriptRoot/tmp +} + +az group show --name $envConfig.resourceGroup +if (!$?) { + Write-Host "Resource group does not exist, creating it now in region 'eastus2euap'" + az group create --name $envConfig.resourceGroup --location eastus2euap + + if (!$?) { + Write-Host "Failed to create Resource Group - exiting!" + Exit 1 + } +} + + +Copy-Item $HOME/.kube/config -Destination $PSScriptRoot/tmp/KUBECONFIG \ No newline at end of file diff --git a/testing/README.md b/testing/README.md new file mode 100644 index 00000000000..33f12b5b1a3 --- /dev/null +++ b/testing/README.md @@ -0,0 +1,116 @@ +# K8s Partner Extension Test Suite + +This repository serves as the integration testing suite for the `k8s-extension` Azure CLI module. + +## Testing Requirements + +All partners who wish to merge their __Custom Private Preview Release__ (owner: _Partner_) into the __Official Private Preview Release__ are required to author additional integration tests for their extension to ensure that their extension will continue to function correctly as more extensions are added into the __Official Private Preview Release__. + +For more information on creating these tests, see [Authoring Tests](docs/test_authoring.md) + +## Pre-Requisites + +In order to properly test all regression tests within the test suite, you must onboard an AKS cluster which you will use to generate your Azure Arc resource to test the extensions. Ensure that you have a resource group where you can onboard this cluster. + +### Required Installations + +The following installations are required in your environment for the integration tests to run correctly: + +1. [Helm 3](https://helm.sh/docs/intro/install/) +2. [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +3. [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) + +## Setup + +### Step 1: Install Pester + +This project contains [Pester](https://pester.dev/) test framework commands that are required for the integration tests to run. In an admin powershell terminal, run + +```powershell +Install-Module Pester -Force -SkipPublisherCheck +Import-Module Pester -PassThru +``` + +If you run into issues installing the framework, refer to the [Installation Guide](https://pester.dev/docs/introduction/installation) provided by the Pester docs. + +### Step 2: Get Test suite files + +You can either clone this repo (preferred option, since you will be adding your tests to this suite) or copy the files in this repo locally. Rest of the instructions here assume your working directory is k8spartner-extension-testing. + +### Step 3: Update the `k8s-extension`/`k8s-extension-private` .whl package + +This integration test suite references the .whl packages found in the `\bin` directory. After generating your `k8s-extension`/`k8s-extension-private` .whl package, copy your updated package into the `\bin` directory. + +### Step 4: Create a `settings.json` + +To onboard the AKS and Arc clusters correctly, you will need to create a `settings.json` configuration. Create a new `settings.json` file by copying the contents of the `settings.template.json` into this file. Update the subscription id, resource group, and AKS and Arc cluster name fields with your specific values. + +### Step 5: Update the extension version value in `settings.json` + +To ensure that the tests point to your `k8s-extension-private` `.whl` package, change the value of the `k8s-extension-private` to match your package versioning in the format (Major.Minor.Patch.Extension). For example, the `k8s_extension_private-0.1.0.openservicemesh_5-py3-none-any.whl` whl package would have extension versions set to +```json +{ + "k8s-extension": "0.1.0", + "k8s-extension-private": "0.1.0.openservicemesh_5", + "connectedk8s": "0.3.5" +} + +``` + +_Note: Updates to the `connectedk8s` version and `k8s-extension` version can also be made by adding a different version of the `connectedk8s` and `k8s-extension` whl packages and changing the `connectedk8s` and `k8s-extension` values to match the (Major.Minor.Patch) version format shown above_ + +### Step 6: Run the Bootstrap Command +To bootstrap the environment with AKS and Arc clusters, run +```powershell +.\Bootstrap.ps1 +``` +This script will provision the AKS and Arc clusters needed to run the integration test suite + +## Testing + +### Testing All Extension Suites +To test all extension test suites, you must call `.\Test.ps1` with the `-ExtensionType` parameter set to either `Public` or `Private`. Based on this flag, the test suite will install the extension type specified below + +| `-ExtensionType` | Installs `az extension` | +| ---------------- | --------------------- | +| `Public` | `k8s-extension` | +| `Private` | `k8s-extension-private` | + +For example, when calling +```bash +.\Test.ps1 -ExtensionType Public +``` +the script will install your `k8s-extension` whl package and run the full test suite of `*.Tests.ps1` files included in the `\test\extensions` directory + +### Testing Public Extensions Only +If you only want to run the test cases against public-preview or GA extension test cases, you can use the `-OnlyPublicTests` flag to specify this +```bash +.\Test.ps1 -ExtensionType Public -OnlyPublicTests +``` + +### Testing Specific Extension Suite + +If you only want to run the test script on your specific test file, you can do so by specifying path to your extension test suite in the execution call + +```powershell +.\Test.ps1 -Path +``` +For example to call the `AzureMonitor.Tests.ps1` test suite, we run +```powershell +.\Test.ps1 -ExtensionType Public -Path .\test\extensions\public\AzureMonitor.Tests.ps1 +``` + +### Skipping Extension Re-Install + +By default the `Test.ps1` script will uninstall any old versions of `k8s-extension`/'`k8s-extension-private` and re-install the version specified in `settings.json`. If you do not want this re-installation to occur, you can specify the `-SkipInstall` flag to skip this process. + +```powershell +.\Test.ps1 -ExtensionType Public -SkipInstall +``` + +## Cleanup +To cleanup the AKS and Arc clusters you have provisioned in testing, run +```powershell +.\Cleanup.ps1 +``` +This will remove the AKS and Arc clusters as well as the `\tmp` directory that were created by the bootstrapping script. \ No newline at end of file diff --git a/testing/Test.ps1 b/testing/Test.ps1 new file mode 100644 index 00000000000..7c6f522d082 --- /dev/null +++ b/testing/Test.ps1 @@ -0,0 +1,99 @@ +param ( + [string] $Path, + [switch] $SkipInstall, + [switch] $CI, + [switch] $ParallelCI, + [switch] $OnlyPublicTests, + + [Parameter(Mandatory=$True)] + [ValidateSet('connectedk8s')] + [string]$Type +) + +# Disable confirm prompt for script +# Only show errors, don't show warnings +az config set core.disable_confirm_prompt=true +az config set core.only_show_errors=true + +$ENVCONFIG = Get-Content -Path $PSScriptRoot/settings.json | ConvertFrom-Json + +# Install the powershell-yaml module +# Needed to parse the kubeconfig file +Install-Module -Name powershell-yaml -Force -Scope CurrentUser + +az account set --subscription $ENVCONFIG.subscriptionId + +$Env:KUBECONFIG="$PSScriptRoot/tmp/KUBECONFIG" +$TestFileDirectory="$PSScriptRoot/results" + +if (-not (Test-Path -Path $TestFileDirectory)) { + New-Item -ItemType Directory -Path $TestFileDirectory +} + +if ($Type -eq 'connectedk8s') { + $connectedk8sVersion = $ENVCONFIG.extensionVersion.'connectedk8s' + if (!$SkipInstall) { + Write-Host "Removing the old connectedk8s extension..." + az extension remove -n connectedk8s + Write-Host "Installing connectedk8s version $connectedk8sVersion..." + az extension add --source ./bin/connectedk8s-$connectedk8sVersion-py2.py3-none-any.whl + } + $testFilePaths = "$PSScriptRoot/test/configurations" +} + +if ($ParallelCI) { + # This runs the tests in parallel during the CI pipline to speed up testing + + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + $testFiles = @() + foreach ($paths in $testFilePaths) + { + $temp = Get-ChildItem $paths + $testFiles += $temp + } + $resultFileNumber = 0 + foreach ($testFile in $testFiles) + { + $resultFileNumber++ + $testName = Split-Path $testFile –leaf + Start-Job -ArgumentList $testName, $testFile, $resultFileNumber, $TestFileDirectory -Name $testName -ScriptBlock { + param($name, $testFile, $resultFileNumber, $testFileDirectory) + + Write-Host "$testFile to result file #$resultFileNumber" + $testResult = Invoke-Pester $testFile -Passthru -Output Detailed + $testResult | Export-JUnitReport -Path "$testFileDirectory/$name.xml" + } + } + + do { + Write-Host ">> Still running tests @ $(Get-Date –Format "HH:mm:ss")" –ForegroundColor Blue + Get-Job | Where-Object { $_.State -eq "Running" } | Format-Table –AutoSize + Start-Sleep –Seconds 30 + } while((Get-Job | Where-Object { $_.State -eq "Running" } | Measure-Object).Count -ge 1) + + Get-Job | Wait-Job + $failedJobs = Get-Job | Where-Object { -not ($_.State -eq "Completed")} + Get-Job | Receive-Job –AutoRemoveJob –Wait –ErrorAction 'Continue' + + if ($failedJobs.Count -gt 0) { + Write-Host "Failed Jobs" –ForegroundColor Red + $failedJobs + throw "One or more tests failed" + } +} elseif ($CI) { + if ($Path) { + $testFilePath = "$PSScriptRoot/$Path" + } + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + $testResult = Invoke-Pester $testFilePath -Passthru -Output Detailed + $testName = Split-Path $testFilePath –leaf + $testResult | Export-JUnitReport -Path "$testFileDirectory/$testName.xml" +} else { + if ($Path) { + Write-Host "Invoking Pester to run tests from '$PSScriptRoot/$Path'" + Invoke-Pester -Output Detailed $PSScriptRoot/$Path + } else { + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + Invoke-Pester -Output Detailed $testFilePath + } +} \ No newline at end of file diff --git a/testing/bin/connectedk8s-1.0.0-py3-none-any.whl b/testing/bin/connectedk8s-1.0.0-py3-none-any.whl new file mode 100644 index 00000000000..08f34250036 Binary files /dev/null and b/testing/bin/connectedk8s-1.0.0-py3-none-any.whl differ diff --git a/testing/bin/connectedk8s-values.yaml b/testing/bin/connectedk8s-values.yaml new file mode 100644 index 00000000000..35716eb9ba1 --- /dev/null +++ b/testing/bin/connectedk8s-values.yaml @@ -0,0 +1,3 @@ +systemDefaultValues: + extensionoperator: + enabled: true \ No newline at end of file diff --git a/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl b/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl new file mode 100644 index 00000000000..cc8e8e0995f Binary files /dev/null and b/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl differ diff --git a/testing/bin/k8s_extension-0.3.0-py3-none-any.whl b/testing/bin/k8s_extension-0.3.0-py3-none-any.whl new file mode 100644 index 00000000000..feb28b80b43 Binary files /dev/null and b/testing/bin/k8s_extension-0.3.0-py3-none-any.whl differ diff --git a/testing/owners.txt b/testing/owners.txt new file mode 100644 index 00000000000..c1bbe9a9e5c --- /dev/null +++ b/testing/owners.txt @@ -0,0 +1,2 @@ +joinnis +nanthi \ No newline at end of file diff --git a/testing/pipeline/k8s-custom-pipelines.yml b/testing/pipeline/k8s-custom-pipelines.yml new file mode 100644 index 00000000000..e7ef612ee2a --- /dev/null +++ b/testing/pipeline/k8s-custom-pipelines.yml @@ -0,0 +1,382 @@ +resources: +- repo: self + +trigger: + batch: true + branches: + include: + - 'main' + +pr: + branches: + include: + - '*' + +stages: +- stage: BuildTestPublishExtension + displayName: "Build, Test, and Publish Extension" + variables: + TEST_PATH: $(Agent.BuildDirectory)/s/testing + CLI_REPO_PATH: $(Agent.BuildDirectory)/s + EXTENSION_NAME: "connectedk8s" + EXTENSION_FILE_NAME: "connectedk8s" + SUBSCRIPTION_ID: "15c06b1b-01d6-407b-bb21-740b8617dea3" + RESOURCE_GROUP: "K8sPartnerExtensionTest" + BASE_CLUSTER_NAME: "connectedk8s-cluster" + jobs: + - template: ./templates/run-test.yml + parameters: + jobName: BasicOnboardingTest + path: ./test/configurations/BasicOnboarding.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: AutoUpdateTest + path: ./test/configurations/AutoUpdate.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: ProxyTest + path: ./test/configurations/Proxy.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: GatewayTest + path: ./test/configurations/Gateway.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: WorkloadIdentityTest + path: ./test/configurations/WorkloadIdentity.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: TroubleshootTest + path: ./test/configurations/Troubleshoot.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: Connectedk8sProxyTest + path: ./test/configurations/ConnectProxy.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: ForcedDeleteTest + path: ./test/configurations/ForcedDelete.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: BundleFeatureFlagTest + path: ./test/configurations/BundleFeatureFlag.Tests.ps1 + - job: BuildPublishExtension + pool: + vmImage: 'ubuntu-latest' + displayName: "Build and Publish the Extension Artifact" + variables: + CLI_REPO_PATH: $(Agent.BuildDirectory)/s + EXTENSION_NAME: "connectedk8s" + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + - bash: | + set -ev + echo "Building extension ${EXTENSION_NAME}..." + + # prepare and activate virtualenv + pip install virtualenv + python3 -m venv env/ + source env/bin/activate + + # clone azure-cli + pip install --upgrade pip + pip install azdev + + ls $(CLI_REPO_PATH) + + azdev --version + azdev setup -r $(CLI_REPO_PATH) -e $(EXTENSION_NAME) + azdev extension build $(EXTENSION_NAME) + workingDirectory: $(CLI_REPO_PATH) + displayName: "Setup and Build Extension with azdev" + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: $(CLI_REPO_PATH)/dist + +- stage: AzureCLIOfficial + displayName: "Azure Official CLI Code Checks" + dependsOn: [] + jobs: + - job: CheckLicenseHeader + displayName: "Check License" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + - bash: | + set -ev + + # prepare and activate virtualenv + python -m venv env/ + + chmod +x ./env/bin/activate + source ./env/bin/activate + + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install -q azdev + + azdev setup -c ../azure-cli -r ./ + + azdev --version + az --version + + azdev verify license + + - job: IndexVerify + displayName: "Verify Extensions Index" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + - bash: | + #!/usr/bin/env bash + set -ev + pip install wheel==0.30.0 requests packaging + export CI="ADO" + python ./scripts/ci/test_index.py -v + displayName: "Verify Extensions Index" + + - job: UnitTests + displayName: "Unit Tests" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: '3.12' + - bash: pip install wheel==0.30.0 + displayName: 'Install wheel==0.30.0' + - bash: | + set -ev + + # prepare and activate virtualenv + pip install virtualenv + python -m virtualenv venv/ + source ./venv/bin/activate + + # clone azure-cli + git clone --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + current_dir=$(pwd) + echo "Current directory: $current_dir" + pip install pytest + cd /home/vsts/work/1/s/src/connectedk8s/azext_connectedk8s/tests/unittests + pytest --junitxml=test-results.xml + + displayName: 'Run UnitTests test' + - task: PublishTestResults@2 + inputs: + testResultsFormat: 'JUnit' + testResultsFiles: '**/test-results.xml' + failTaskOnFailedTests: true + - job: SourceTests + displayName: "Integration Tests, Build Tests" + pool: + vmImage: 'ubuntu-latest' + strategy: + matrix: + Python39: + python.version: '3.9' + Python310: + python.version: '3.10' + Python311: + python.version: '3.11' + Python312: + python.version: '3.12' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python $(python.version)' + inputs: + versionSpec: '$(python.version)' + - bash: pip install wheel==0.30.0 + displayName: 'Install wheel==0.30.0' + - bash: | + set -ev + + # prepare and activate virtualenv + pip install virtualenv + python -m virtualenv venv/ + source ./venv/bin/activate + + # clone azure-cli + git clone --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + azdev test connectedk8s + displayName: 'Run integration test and build test' + + - job: AzdevLinterModifiedExtensions + displayName: "azdev linter on Modified Extensions" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + pip install virtualenv + python -m virtualenv venv/ + source ./venv/bin/activate + + # clone azure-cli + git clone --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + # Installing setuptools with a version higher than 70.0.0 will not generate metadata.json + pip install setuptools==70.0.0 + pip list -v + + # overwrite the default AZURE_EXTENSION_DIR set by ADO + AZURE_EXTENSION_DIR=~/.azure/cliextensions az --version + + AZURE_EXTENSION_DIR=~/.azure/cliextensions azdev linter --include-whl-extensions connectedk8s + displayName: "CLI Linter on Modified Extension" + env: + ADO_PULL_REQUEST_LATEST_COMMIT: $(System.PullRequest.SourceCommitId) + ADO_PULL_REQUEST_TARGET_BRANCH: $(System.PullRequest.TargetBranch) + + - job: AzdevStyleModifiedExtensions + displayName: "azdev style on Modified Extensions" + continueOnError: true + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + python -m venv env + chmod +x env/bin/activate + source ./env/bin/activate + + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + # Installing setuptools with a version higher than 70.0.0 will not generate metadata.json + pip install setuptools==70.0.0 + pip list -v + az --version + + # overwrite the default AZURE_EXTENSION_DIR set by ADO + AZURE_EXTENSION_DIR=~/.azure/cliextensions az --version + + AZURE_EXTENSION_DIR=~/.azure/cliextensions azdev style connectedk8s + displayName: "azdev style on Modified Extensions" + env: + ADO_PULL_REQUEST_LATEST_COMMIT: $(System.PullRequest.SourceCommitId) + ADO_PULL_REQUEST_TARGET_BRANCH: $(System.PullRequest.TargetBranch) + + - job: RuffCheck + displayName: "Lint connectedk8s with ruff check" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + ruff check + + displayName: "ruff check" + + - job: RuffFormat + displayName: "Check connected8ks formatting with ruff" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + ruff format --check + + displayName: "ruff format check" + + - job: TypeChecking + displayName: "Typecheck connected8ks with mypy" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + mypy + + displayName: "mypy" diff --git a/testing/pipeline/templates/run-test.yml b/testing/pipeline/templates/run-test.yml new file mode 100644 index 00000000000..a15d47f4384 --- /dev/null +++ b/testing/pipeline/templates/run-test.yml @@ -0,0 +1,112 @@ +parameters: + jobName: '' + path: '' + +jobs: +- job: ${{ parameters.jobName}} + pool: + vmImage: 'ubuntu-latest' + steps: + - bash: | + echo "Installing helm3" + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh --version v3.6.3 + echo "Installing kubectl" + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + kubectl version --client + displayName: "Setup the VM with helm3 and kubectl" + + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + + - bash: | + set -ev + echo "Building extension ${EXTENSION_NAME}..." + # prepare and activate virtualenv + pip install virtualenv + python3 -m venv env/ + source env/bin/activate + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + pip install --upgrade pip + pip install -q azdev + ls $(CLI_REPO_PATH) + azdev --version + azdev setup -c ../azure-cli -r $(CLI_REPO_PATH) -e $(EXTENSION_NAME) + azdev extension build $(EXTENSION_NAME) + workingDirectory: $(CLI_REPO_PATH) + displayName: "Setup and Build Extension with azdev" + + - bash: | + K8S_CONFIG_VERSION=$(ls ${EXTENSION_FILE_NAME}* | cut -d "-" -f2) + echo "##vso[task.setvariable variable=K8S_CONFIG_VERSION]$K8S_CONFIG_VERSION" + cp * $(TEST_PATH)/bin + workingDirectory: $(CLI_REPO_PATH)/dist + displayName: "Copy the Built .whl to Extension Test Path" + + - bash: | + RAND_STR=$RANDOM + AKS_CLUSTER_NAME="${BASE_CLUSTER_NAME}-${RAND_STR}-aks" + ARC_CLUSTER_NAME="${BASE_CLUSTER_NAME}-${RAND_STR}-arc" + + JSON_STRING=$(jq -n \ + --arg SUB_ID "$SUBSCRIPTION_ID" \ + --arg RG "$RESOURCE_GROUP" \ + --arg AKS_CLUSTER_NAME "$AKS_CLUSTER_NAME" \ + --arg ARC_CLUSTER_NAME "$ARC_CLUSTER_NAME" \ + --arg K8S_CONFIG_VERSION "$K8S_CONFIG_VERSION" \ + '{subscriptionId: $SUB_ID, resourceGroup: $RG, aksClusterName: $AKS_CLUSTER_NAME, arcClusterName: $ARC_CLUSTER_NAME, extensionVersion: {"connectedk8s": $K8S_CONFIG_VERSION}}') + echo $JSON_STRING > settings.json + cat settings.json + workingDirectory: $(TEST_PATH) + displayName: "Generate a settings.json file" + + - bash : | + echo "Downloading the kind script" + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + displayName: "Create and Start the Kind cluster" + + - bash: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + displayName: "Upgrade az to latest version" + + - bash: | + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh --version v3.6.3 + displayName: "Install Helm" + + - task: AzureCLI@2 + displayName: Bootstrap + inputs: + azureSubscription: AzureResourceConnection + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + .\Bootstrap.ps1 -CI + workingDirectory: $(TEST_PATH) + + - task: AzureCLI@2 + displayName: Run the Test Suite for ${{ parameters.path }} + inputs: + azureSubscription: AzureResourceConnection + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + .\Test.ps1 -CI -Path ${{ parameters.path }} -Type connectedk8s + workingDirectory: $(TEST_PATH) + continueOnError: true + + - task: PublishTestResults@2 + inputs: + testResultsFormat: 'JUnit' + testResultsFiles: '**/testing/results/*.xml' + failTaskOnFailedTests: true + condition: succeededOrFailed() diff --git a/testing/settings.template.json b/testing/settings.template.json new file mode 100644 index 00000000000..657126c20aa --- /dev/null +++ b/testing/settings.template.json @@ -0,0 +1,12 @@ +{ + "subscriptionId": "", + "resourceGroup": "", + "aksClusterName": "", + "arcClusterName": "", + + "extensionVersion": { + "k8s-extension": "0.3.0", + "k8s-extension-private": "0.1.0", + "connectedk8s": "1.0.0" + } +} \ No newline at end of file diff --git a/testing/test/configurations/AutoUpdate.Tests.ps1 b/testing/test/configurations/AutoUpdate.Tests.ps1 new file mode 100644 index 00000000000..d55029ceeb8 --- /dev/null +++ b/testing/test/configurations/AutoUpdate.Tests.ps1 @@ -0,0 +1,62 @@ +Describe 'Auto Upgrade Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works with auto-upgrade disabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --disable-auto-upgrade --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Disabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Enable auto-upgrade using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade true + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Enabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/BasicOnboarding.Tests.ps1 b/testing/test/configurations/BasicOnboarding.Tests.ps1 new file mode 100644 index 00000000000..6605d54c383 --- /dev/null +++ b/testing/test/configurations/BasicOnboarding.Tests.ps1 @@ -0,0 +1,62 @@ +Describe 'Basic Onboarding Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Enabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable auto-upgrade' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Disabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/BundleFeatureFlag.Tests.ps1 b/testing/test/configurations/BundleFeatureFlag.Tests.ps1 new file mode 100644 index 00000000000..c796a159695 --- /dev/null +++ b/testing/test/configurations/BundleFeatureFlag.Tests.ps1 @@ -0,0 +1,230 @@ +Describe 'Setting Bundle Feature Flag Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Enable the bundle feature flag when connecting the cluster to Arc' { + $output = & { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $BUNDLE_FEATURE_TEST_ARC_LOCATION ` + --disable-auto-upgrade --config extensionSets.versionManagedExtensions='off' 2>&1 | Out-String + } + $output | Should -Match "Not supported value for the feature flag" + + $output = & { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $BUNDLE_FEATURE_TEST_ARC_LOCATION ` + --disable-auto-upgrade --config extensionSets.versionManagedExtensions='disabled' 2>&1 | Out-String + } + $output | Should -Match "'disabled' mode can only be set using 'az connectedk8s update'" + + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $BUNDLE_FEATURE_TEST_ARC_LOCATION ` + --disable-auto-upgrade --config extensionSets.versionManagedExtensions='preview' --no-wait --yes + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $bundleFeatureFlag = $jsonOutput.RootElement.GetProperty("arcAgentryConfigurations")[0].GetProperty("settings").GetProperty("versionManagedExtensions").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Bundle Feature Flag: $bundleFeatureFlag" + if ($provisioningState -eq $SUCCEEDED -and $bundleFeatureFlag -eq "preview") + { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + + $output = & { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='enabled' 2>&1 | Out-String + } + $output | Should -Match "The cluster is in versionManagedExtensions 'preview' mode, updating the value is not allowed." + + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $BUNDLE_FEATURE_TEST_ARC_LOCATION ` + --disable-auto-upgrade --config extensionSets.versionManagedExtensions='enabled' --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $bundleFeatureFlag = $jsonOutput.RootElement.GetProperty("arcAgentryConfigurations")[0].GetProperty("settings").GetProperty("versionManagedExtensions").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Bundle Feature Flag: $bundleFeatureFlag" + if ($provisioningState -eq $SUCCEEDED -and $bundleFeatureFlag -eq "enabled") + { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Enable the bundle feature flag using update cmd' { + $output = & { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='on' 2>&1 | Out-String + } + $output | Should -Match "Not supported value for the feature flag" + + $output = & { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='preview' 2>&1 | Out-String + } + $output | Should -Match "Updating the preview mode config with 'az connectedk8s update' is not allowed" + + $output = & { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='' 2>&1 | Out-String + } + $output | Should -Match "Could not set extensionSets.versionManagedExtensions from 'enabled' to ''" + + az k8s-extension create --cluster-name $ENVCONFIG.arcClusterName --resource-group $ENVCONFIG.resourceGroup ` + --cluster-type connectedClusters --extension-type microsoft.iotoperations.platform ` + --name azure-iot-operations-platform --release-train preview --auto-upgrade-minor-version False ` + --config installTrustManager=true --config installCertManager=true --version 0.7.24 ` + --release-namespace cert-manager --scope cluster + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + az k8s-extension create --cluster-name $ENVCONFIG.arcClusterName --resource-group $ENVCONFIG.resourceGroup ` + --cluster-type connectedClusters --extension-type microsoft.azure.secretstore ` + --name azure-secret-store --auto-upgrade-minor-version False ` + --config rotationPollIntervalInSeconds=120 --config validatingAdmissionPolicies.applyPolicies=false ` + --scope cluster + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + $output = & { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='disabled' 2>&1 | Out-String + } + $output | Should -Match "detected the following extension types on the cluster" + + az k8s-extension delete --cluster-name $ENVCONFIG.arcClusterName --resource-group $ENVCONFIG.resourceGroup ` + --cluster-type connectedClusters --name azure-secret-store --yes + $? | Should -BeTrue + + az k8s-extension delete --cluster-name $ENVCONFIG.arcClusterName --resource-group $ENVCONFIG.resourceGroup ` + --cluster-type connectedClusters --name azure-iot-operations-platform --yes + $? | Should -BeTrue + + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='disabled' + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $bundleFeatureFlag = $jsonOutput.RootElement.GetProperty("arcAgentryConfigurations")[0].GetProperty("settings").GetProperty("versionManagedExtensions").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Bundle Feature Flag: $bundleFeatureFlag" + if ($provisioningState -eq $SUCCEEDED -and $bundleFeatureFlag -eq "disabled") + { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Verify the error message when failing to upgrade the agent with bundle feature flag enabled" { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false ` + --config extensionSets.versionManagedExtensions='enabled' + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $bundleFeatureFlag = $jsonOutput.RootElement.GetProperty("arcAgentryConfigurations")[0].GetProperty("settings").GetProperty("versionManagedExtensions").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Bundle Feature Flag: $bundleFeatureFlag" + if ($provisioningState -eq $SUCCEEDED -and $bundleFeatureFlag -eq "enabled") + { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + + $ns = "azure-arc" + $rootDir = Resolve-Path -Path (Join-Path $PSScriptRoot "..\..\..") + + $arcAgentValuesPath = Get-ChildItem -Path $rootDir -Recurse -Filter "ArcAgentryValues.json" -File -ErrorAction SilentlyContinue | Select-Object -First 1 + $arcAgentValuesPath | Should -Not -BeNullOrEmpty -Because "ArcAgentryValues.json is expected to exist under the root directory." + + $fakeExtConfigPath = Get-ChildItem -Path $rootDir -Recurse -Filter "fake_ext_config.yml" -File -ErrorAction SilentlyContinue | Select-Object -First 1 + $fakeExtConfigPath | Should -Not -BeNullOrEmpty -Because "fake_ext_config.yml is expected to exist under the root directory." + + $tmpDir = Join-Path $rootDir "testing/tmp" + + if (-not (Test-Path $tmpDir)) { + New-Item -ItemType Directory -Path $tmpDir | Out-Null + } + + # Update lastSyncTime in fake_ext_config.yml + $updatedFakeExtConfigPath = Join-Path $tmpDir "fake_ext_config_updated.yml" + $data = Get-Content $fakeExtConfigPath | ConvertFrom-Yaml + $data.status.syncStatus.lastSyncTime = (Get-Date).ToString("yyyy-MM-ddTHH:mm:ss.000Z") + $data | ConvertTo-Yaml | Set-Content $updatedFakeExtConfigPath + + # Create the fake extension config to simulate an extension that depends on the bundle + kubectl apply -f $updatedFakeExtConfigPath --namespace $ns + $? | Should -BeTrue + + $ENV:HELMVALUESPATH = $arcAgentValuesPath + + $output = & { + az connectedk8s upgrade -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --agent-version "1.26.0" 2>&1 | Out-String + } + $output | Should -Match "Error: Failed to validate agent update.*no such host" + + $env:HELMVALUESPATH = "" + + # Remove the finalizers from the fake extension config to allow deletion + kubectl patch extensionconfig fake-ext-config --namespace $ns --type=json -p '[{"op": "remove", "path": "/metadata/finalizers"}]' + + Start-Process kubectl -ArgumentList "delete extensionconfig fake-ext-config --namespace $ns" -NoNewWindow + + Start-Sleep -Seconds 60 + + kubectl get extensionconfig fake-ext-config --namespace $ns 2>&1 + $? | Should -BeFalse + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/ConnectProxy.Tests.ps1 b/testing/test/configurations/ConnectProxy.Tests.ps1 new file mode 100644 index 00000000000..4de00bbeba0 --- /dev/null +++ b/testing/test/configurations/ConnectProxy.Tests.ps1 @@ -0,0 +1,98 @@ +Describe 'Connectedk8s Proxy Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Connectedk8s proxy test with non-empty kubeconfig' { + # Start the proxy command as a background job + $proxyJob = Start-Job -ScriptBlock { + param($ClusterName, $ResourceGroup) + + # Capture output and errors + try { + $output = az connectedk8s proxy -n $ClusterName -g $ResourceGroup 2>&1 + return @{ Success = $LASTEXITCODE -eq 0; Output = $output } + } catch { + return @{ Success = $false; Output = $_.Exception.Message } + } + } -ArgumentList $ENVCONFIG.arcClusterName, $ENVCONFIG.resourceGroup + + # Wait for a certain amount of time (e.g., 30 seconds) + Start-Sleep -Seconds 60 + + # Display the output + Write-Host "Proxy Job State: $($proxyJob.State)" + + # Check if the job ran successfully + $proxyJob.State | Should -Be 'Running' + + # Check if the kubeconfig file has been updated to use the proxy + $kubeconfigPath = "~/.kube/config" + $kubeconfig = Get-Content $kubeconfigPath -Raw | ConvertFrom-Yaml + # Extract the current context + $currentContext = $kubeconfig.'current-context' + + # Validate that the current context is for the arc machine + $currentContext | Should -Be $ENVCONFIG.arcClusterName + + # Find the cluster associated with the current context + $context = $kubeconfig.contexts | Where-Object { $_.name -eq $currentContext } + $clusterName = $context.context.cluster + + # Retrieve the server URL for the cluster + $cluster = $kubeconfig.clusters | Where-Object { $_.name -eq $clusterName } + $server = $cluster.cluster.server + + # Validate the server URL + $server | Should -Match "^https://127.0.0.1:47011/proxies/" + + # Check if the proxy command ran successfully + $kubectlJob = Start-Job -ScriptBlock { + try { + $output = kubectl get pods -n azure-arc 2>&1 + return @{ Success = $LASTEXITCODE -eq 0; Output = $output } + } catch { + return @{ Success = $false; Output = $_.Exception.Message } + } + } + + $kubectlJob | Wait-Job + $kubectlResult = Receive-Job -Job $kubectlJob + + # Assert that the result is 0 + $kubectlResult.Success | Should -BeTrue + + Stop-Job -Job $proxyJob + Remove-Job -Job $proxyJob + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/ForcedDelete.Tests.ps1 b/testing/test/configurations/ForcedDelete.Tests.ps1 new file mode 100644 index 00000000000..ca38873dd54 --- /dev/null +++ b/testing/test/configurations/ForcedDelete.Tests.ps1 @@ -0,0 +1,38 @@ +Describe 'Basic Onboarding with Force delete Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if onboarding works correctly' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Enabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Force delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Gateway.Tests.ps1 b/testing/test/configurations/Gateway.Tests.ps1 new file mode 100644 index 00000000000..37dab0eccc9 --- /dev/null +++ b/testing/test/configurations/Gateway.Tests.ps1 @@ -0,0 +1,116 @@ +Describe 'Onboarding with Gateway Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + + $gatewayResourceId = "/subscriptions/15c06b1b-01d6-407b-bb21-740b8617dea3/resourceGroups/connectedk8sCLITestResources/providers/Microsoft.HybridCompute/gateways/gateway-test-cli" + } + + It 'Check if onboarding works with gateway enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --gateway-resource-id $gatewayResourceId --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + $gatewayId = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("resourceId").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + Write-Host "Gateway Id: $gatewayId" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $true -and $gatewayId -eq $gatewayResourceId) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable the gateway' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-gateway + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $false) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster to use gateway again using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --gateway-resource-id $gatewayResourceId + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + $gatewayId = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("resourceId").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + Write-Host "Gateway Id: $gatewayId" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $true -and $gatewayId -eq $gatewayResourceId) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable the gateway' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-gateway + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $false) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Proxy.Tests.ps1 b/testing/test/configurations/Proxy.Tests.ps1 new file mode 100644 index 00000000000..bda7b06e4bc --- /dev/null +++ b/testing/test/configurations/Proxy.Tests.ps1 @@ -0,0 +1,65 @@ +Describe 'Proxy Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly with proxy enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --proxy-skip-range logcollector --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + $isProxyEnabled = helm get values -n azure-arc-release azure-arc -o yaml | grep isProxyEnabled + Write-Host "$isProxyEnabled" + if ($isProxyEnabled -match "isProxyEnabled: true") { + break + } + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable proxy' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-proxy + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + $isProxyEnabled = helm get values -n azure-arc-release azure-arc -o yaml | grep isProxyEnabled + Write-Host "$isProxyEnabled" + if ($isProxyEnabled -match "isProxyEnabled: false") { + break + } + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Troubleshoot.Tests.ps1 b/testing/test/configurations/Troubleshoot.Tests.ps1 new file mode 100644 index 00000000000..c9cb4e26010 --- /dev/null +++ b/testing/test/configurations/Troubleshoot.Tests.ps1 @@ -0,0 +1,40 @@ +Describe 'Troubleshoot Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Verify cluster onboarding process' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Verify troubleshoot command functionality' { + az connectedk8s troubleshoot -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeTrue + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/WorkloadIdentity.Tests.ps1 b/testing/test/configurations/WorkloadIdentity.Tests.ps1 new file mode 100644 index 00000000000..c728b6a5236 --- /dev/null +++ b/testing/test/configurations/WorkloadIdentity.Tests.ps1 @@ -0,0 +1,239 @@ +Describe 'Onboarding with Workload Identity Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if onboarding works with oidc and workload identity enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --enable-oidc-issuer --enable-workload-identity --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $oidcIssuerProfile -eq $true -and + ![string]::IsNullOrEmpty($issuerUrl) -and + $issuerUrl -like "*unitedkingdom*" -and + [string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable workload identity' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "Agent State: $agentState" + if ($provisioningState -eq $SUCCEEDED -and $securityProfile -eq $false -and $agentState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster to use workload identity again using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --enable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + Start-Sleep -Seconds 10 + } +} + +Describe 'Updating with Workload Identity Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Onboard a cluster to arc' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster with oidc and workload identity enabled' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --enable-oidc-issuer --enable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $oidcIssuerProfile -eq $true -and + ![string]::IsNullOrEmpty($issuerUrl) -and + $issuerUrl -like "*unitedkingdom*" -and + [string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + Start-Sleep -Seconds 10 + } +} + +Describe 'Creating with Workload Identity Scenario and Self Hosted Issuer' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + + $SelfHostedIssuer = "https://eastus.oic.prod-aks.azure.com/fc50e82b-3761-4218-8691-d98bcgb146da/e6c4bf03-84d9-480c-a269-37a41c28c5cb/" + } + + It 'Check if onboarding works with oidc enabled and self-hosted issuer url passed in' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --enable-oidc-issuer --self-hosted-issuer $SelfHostedIssuer --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + if ( + $provisioningState -eq $SUCCEEDED -and + $oidcIssuerProfile -eq $true -and + [string]::IsNullOrEmpty($issuerUrl) -and + ![string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $selfHostedIssuerUrl -eq $SelfHostedIssuer + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/helper/Constants.ps1 b/testing/test/helper/Constants.ps1 new file mode 100644 index 00000000000..9cb37568d97 --- /dev/null +++ b/testing/test/helper/Constants.ps1 @@ -0,0 +1,6 @@ +$ENVCONFIG = Get-Content -Path $PSScriptRoot/../../settings.json | ConvertFrom-Json + +$MAX_RETRY_ATTEMPTS = 30 +$ARC_LOCATION = "uksouth" +$BUNDLE_FEATURE_TEST_ARC_LOCATION = "eastus" +$SUCCEEDED = "Succeeded" \ No newline at end of file