From b9aa0ddfa6f2d30d2a84a5b477d7a14c204e4a81 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Sun, 7 Dec 2025 18:02:41 -0500 Subject: [PATCH 01/23] Add more unit tests --- tests/python/test_users.py | 26 +++++- tests/python/test_verify_local_setup.py | 102 ++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 tests/python/test_verify_local_setup.py diff --git a/tests/python/test_users.py b/tests/python/test_users.py index abc1300..18eb276 100644 --- a/tests/python/test_users.py +++ b/tests/python/test_users.py @@ -4,7 +4,7 @@ import random import pytest -from users import User, Users, UserHelper +from users import User, UserName, Users, UserHelper from apimtypes import Role # ------------------------------ @@ -75,6 +75,30 @@ def test_get_user_by_role_single_match(): assert user is not None assert Role.HR_MEMBER in user.roles + +def test_get_user_by_exact_name(): + """Direct string lookups should find the matching user.""" + + user = UserHelper.get_user('Dylan Williams') + + assert user is not None + assert user.name == 'Dylan Williams' + + +def test_get_user_by_enum_name(): + """Enum members can be used to locate users.""" + + user = UserHelper.get_user(UserName.ELIZABETH_MOORE) + + assert user is not None + assert user.name == UserName.ELIZABETH_MOORE + + +def test_get_user_missing_returns_none(): + """Unknown user names should return None.""" + + assert UserHelper.get_user('Nonexistent User') is None + def test_get_user_by_role_multiple_roles(): """ Should return a user with any of the specified roles. diff --git a/tests/python/test_verify_local_setup.py b/tests/python/test_verify_local_setup.py new file mode 100644 index 0000000..90bbb42 --- /dev/null +++ b/tests/python/test_verify_local_setup.py @@ -0,0 +1,102 @@ +"""Unit tests for verify_local_setup script.""" + +from __future__ import annotations + +import importlib +import sys +from pathlib import Path +from types import ModuleType, SimpleNamespace +from typing import Any, TYPE_CHECKING, cast + +import pytest + +# Ensure the setup folder is on sys.path so the verification script is importable. +PROJECT_ROOT = Path(__file__).resolve().parents[2] +SETUP_PATH = PROJECT_ROOT / "setup" +if str(SETUP_PATH) not in sys.path: + sys.path.insert(0, str(SETUP_PATH)) + +if TYPE_CHECKING: # pragma: no cover - placeholder for type inference + vls = cast(ModuleType, None) +else: + vls = cast(ModuleType, importlib.import_module("verify_local_setup")) + + +# ------------------------------ +# FIXTURES +# ------------------------------ + +@pytest.fixture +def temp_cwd(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> Path: + """Temporarily override Path.cwd to return tmp_path.""" + + monkeypatch.setattr(Path, "cwd", lambda: tmp_path) + return tmp_path + + +# ------------------------------ +# TESTS +# ------------------------------ + + +def test_check_virtual_environment_success(temp_cwd: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Virtual environment check should pass when .venv exists and python resides inside it.""" + + scripts_dir = temp_cwd / ".venv" / ("Scripts" if sys.platform.startswith("win") else "bin") + scripts_dir.mkdir(parents=True) + venv_python = scripts_dir / "python" + venv_python.write_text("#!/usr/bin/env python") + + monkeypatch.setattr(sys, "executable", str(venv_python)) + + assert vls.check_virtual_environment() is True + + +def test_check_required_packages_missing(monkeypatch: pytest.MonkeyPatch) -> None: + """Package check should return False when any dependency fails to import.""" + + def fake_import(name: str, *args: Any, **kwargs: Any) -> Any: + if name == "dotenv": + raise ImportError("dotenv missing") + + # Return a lightweight placeholder for expected modules. + return SimpleNamespace(__name__=name) + + monkeypatch.setattr("builtins.__import__", fake_import) + + assert vls.check_required_packages() is False + + +def test_check_vscode_settings_success(temp_cwd: Path) -> None: + """VS Code settings check should succeed when required keys are present.""" + + settings_dir = temp_cwd / ".vscode" + settings_dir.mkdir(parents=True) + (settings_dir / "settings.json").write_text( + '{\n' + ' "jupyter.defaultKernel": "apim-samples",\n' + ' "python.defaultInterpreterPath": ".venv/",\n' + ' "notebook.defaultLanguage": "python"\n' + '}\n', + encoding="utf-8", + ) + + assert vls.check_vscode_settings() is True + + +def test_check_env_file_validation(temp_cwd: Path) -> None: + """Environment file check should validate required keys.""" + + env_path = temp_cwd / ".env" + env_path.write_text("PYTHONPATH=/tmp\nPROJECT_ROOT=/repo\n", encoding="utf-8") + + assert vls.check_env_file() is True + + +def test_check_env_file_missing_key(temp_cwd: Path) -> None: + """Environment file check should fail when keys are missing.""" + + env_path = temp_cwd / ".env" + env_path.write_text("PYTHONPATH=/tmp\n", encoding="utf-8") + + assert vls.check_env_file() is False From 588172f4118da70080278d47bdc7899bbb00bfde Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 09:36:40 -0500 Subject: [PATCH 02/23] Parallelize resource cleanup --- shared/python/utils.py | 433 ++++++++++++++++++++++++++++++++----- tests/python/test_utils.py | 25 ++- 2 files changed, 397 insertions(+), 61 deletions(-) diff --git a/shared/python/utils.py b/shared/python/utils.py index 439ac97..7926557 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -700,10 +700,205 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: # PRIVATE METHODS # ------------------------------ + +# ------------------------------ +# INFRASTRUCTURE CLEANUP SYSTEM +# ------------------------------ +# +# This module provides a hierarchical cleanup system for Azure infrastructure with two levels of parallelization: +# +# LEVEL 1: Parallel Resource Group Cleanup +# - When cleaning multiple infrastructure instances (via cleanup_infra_deployments with multiple indexes), +# each resource group is cleaned up in parallel (up to 4 concurrent threads) +# - Each thread gets its own color-coded output for easy tracking +# +# LEVEL 2: Parallel Resource Cleanup (within each resource group) +# - Within each resource group, resources (APIM, Key Vault, Cognitive Services) are deleted and purged in parallel +# - Up to 5 resources can be processed concurrently per resource group +# - Significantly faster than sequential deletion, especially when resources have long delete/purge times +# +# Architecture: +# cleanup_infra_deployments() [Public API - manages multiple resource groups] +# ├─> _cleanup_resources_thread_safe() [Thread-safe wrapper for parallel RG cleanup] +# │ └─> _cleanup_resources_with_thread_safe_printing() +# │ └─> _cleanup_resources_parallel() +# │ +# └─> _cleanup_resources() [Direct call for single RG cleanup] +# └─> _cleanup_resources_parallel() +# +# _cleanup_resources_parallel() [Orchestrates parallel resource deletion] +# └─> _cleanup_single_resource() [Worker: deletes and purges one resource] +# +# Performance Impact: +# - Old approach: Resources deleted sequentially (6 minutes for 3 resources @ 2min each) +# - New approach: Resources deleted in parallel (~2 minutes for 3 resources) +# - With multiple resource groups: Compound parallelization benefits +# +# ------------------------------ + +def _cleanup_single_resource(resource: dict) -> tuple[bool, str]: + """ + Delete and purge a single Azure resource (worker function for parallel cleanup). + + This is the atomic unit of work that deletes and purges one resource. + Called by _cleanup_resources_parallel() which manages multiple resources concurrently. + + Args: + resource (dict): Resource information with keys: type, name, location, rg_name + + Returns: + tuple[bool, str]: (success, error_message) + """ + try: + resource_type = resource['type'] + resource_name = resource['name'] + rg_name = resource['rg_name'] + location = resource['location'] + + print_info(f"Deleting and purging {resource_type} '{resource_name}'...") + + # Delete the resource + if resource_type == 'cognitiveservices': + delete_cmd = f"az cognitiveservices account delete -g {rg_name} -n {resource_name}" + purge_cmd = f"az cognitiveservices account purge -g {rg_name} -n {resource_name} --location \"{location}\"" + elif resource_type == 'apim': + delete_cmd = f"az apim delete -n {resource_name} -g {rg_name} -y" + purge_cmd = f"az apim deletedservice purge --service-name {resource_name} --location \"{location}\"" + elif resource_type == 'keyvault': + delete_cmd = f"az keyvault delete -n {resource_name} -g {rg_name}" + purge_cmd = f"az keyvault purge -n {resource_name} --location \"{location}\"" + else: + return False, f"Unknown resource type: {resource_type}" + + # Execute delete + output = run(delete_cmd, f"{resource_type} '{resource_name}' deleted", f"Failed to delete {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + if not output.success: + return False, f"Delete failed for {resource_name}" + + # Execute purge + output = run(purge_cmd, f"{resource_type} '{resource_name}' purged", f"Failed to purge {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + if not output.success: + return False, f"Purge failed for {resource_name}" + + return True, "" + + except Exception as e: + return False, str(e) + + +def _cleanup_resources_parallel(resources: list[dict], thread_prefix: str = '', thread_color: str = '') -> None: + """ + Clean up multiple resources in parallel using ThreadPoolExecutor (orchestrator function). + + This function manages concurrent deletion and purging of Azure resources within a single resource group. + Can operate in two modes: regular printing or thread-safe printing (for when multiple RGs are being cleaned in parallel). + + Args: + resources (list[dict]): List of resources to clean up, each with keys: type, name, location, rg_name + thread_prefix (str, optional): Prefix for thread-safe logging (empty = regular printing) + thread_color (str, optional): ANSI color code for thread-safe logging + """ + if not resources: + return + + # Limit concurrent operations to avoid overwhelming Azure APIs + max_workers = min(len(resources), 5) + + # Determine if we need thread-safe printing + use_thread_safe_printing = bool(thread_prefix) + + # Helper function for thread-safe or regular printing + def log_info(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '👉🏽 ', thread_color) + else: + print_info(msg) + + def log_success(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color, show_time=True) + else: + print_success(msg) + + def log_error(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⛔ ', BOLD_R) + else: + print_error(msg) + + def log_ok(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color) + else: + print_ok(msg) + + def log_warning(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⚠️ ', BOLD_Y) + else: + print_warning(msg) + + log_info(f'Starting parallel cleanup of {len(resources)} resources with {max_workers} workers...') + + completed_count = 0 + failed_count = 0 + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all cleanup tasks + future_to_resource = { + executor.submit(_cleanup_single_resource, resource): resource + for resource in resources + } + + # Wait for completion and track results + for future in as_completed(future_to_resource): + resource = future_to_resource[future] + try: + success, error_msg = future.result() + completed_count += 1 + + if success: + log_success(f"✓ Cleaned up {resource['type']} '{resource['name']}' ({completed_count}/{len(resources)})") + else: + failed_count += 1 + log_error(f"✗ Failed to clean up {resource['type']} '{resource['name']}': {error_msg}") + + except Exception as e: + failed_count += 1 + log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") + + # Summary + if failed_count == 0: + log_ok(f'All {len(resources)} resources cleaned up successfully!') + else: + log_warning(f'Completed with {failed_count} failures out of {len(resources)} total resources.') + if completed_count - failed_count > 0: + log_info(f'{completed_count - failed_count} resources cleaned up successfully.') + + +def _cleanup_resources_parallel_thread_safe(resources: list[dict], thread_prefix: str, thread_color: str) -> None: + """ + Convenience wrapper for parallel cleanup with thread-safe printing. + + Args: + resources (list[dict]): List of resources to clean up + thread_prefix (str): Thread prefix for output formatting + thread_color (str): ANSI color code for this thread + """ + _cleanup_resources_parallel(resources, thread_prefix, thread_color) + + def _cleanup_resources(deployment_name: str, rg_name: str) -> None: """ - Clean up resources associated with a deployment in a resource group. - Deletes and purges Cognitive Services, API Management, and Key Vault resources, then deletes the resource group itself. + Clean up resources in a single resource group (main cleanup entry point for sequential mode). + + Lists all Azure resources (APIM, Key Vault, Cognitive Services) in a resource group, + then deletes and purges them in parallel before removing the resource group itself. Args: deployment_name (str): The deployment name (string). @@ -726,46 +921,163 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: try: print_info(f'Resource group : {rg_name}') - # Show the deployment details - output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) + # Show the deployment details (if it exists) + output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Deployment not found (may be empty resource group)', print_command_to_run = False, print_errors = False) + # Collect all resources that need to be deleted and purged + resources_to_cleanup = [] + + # List CognitiveService accounts + output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: - # Delete and purge CognitiveService accounts - output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'cognitiveservices', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List APIM resources + output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'apim', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List Key Vault resources + output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'keyvault', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # Delete and purge resources in parallel if there are any + if resources_to_cleanup: + print_info(f'Found {len(resources_to_cleanup)} resources to clean up. Processing in parallel...') + _cleanup_resources_parallel(resources_to_cleanup) + else: + print_info('No resources found to clean up.') - if output.success and output.json_data: - for resource in output.json_data: - print_info(f"Deleting and purging Cognitive Service Account '{resource['name']}'...") - output = run(f"az cognitiveservices account delete -g {rg_name} -n {resource['name']}", f"Cognitive Services '{resource['name']}' deleted", f"Failed to delete Cognitive Services '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az cognitiveservices account purge -g {rg_name} -n {resource['name']} --location \"{resource['location']}\"", f"Cognitive Services '{resource['name']}' purged", f"Failed to purge Cognitive Services '{resource['name']}'", print_command_to_run = False, print_errors = False) + # Delete the resource group last (always attempt this, even if deployment doesn't exist) + print_message(f"Deleting resource group '{rg_name}'...") + output = run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) - # Delete and purge APIM resources - output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + print_message('Cleanup completed.') - if output.success and output.json_data: - for resource in output.json_data: - print_info(f"Deleting and purging API Management '{resource['name']}'...") - output = run(f"az apim delete -n {resource['name']} -g {rg_name} -y", f"API Management '{resource['name']}' deleted", f"Failed to delete API Management '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az apim deletedservice purge --service-name {resource['name']} --location \"{resource['location']}\"", f"API Management '{resource['name']}' purged", f"Failed to purge API Management '{resource['name']}'", print_command_to_run = False, print_errors = False) + except Exception as e: + print(f'An error occurred during cleanup: {e}') + traceback.print_exc() - # Delete and purge Key Vault resources - output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - print_info(f"Deleting and purging Key Vault '{resource['name']}'...") - output = run(f"az keyvault delete -n {resource['name']} -g {rg_name}", f"Key Vault '{resource['name']}' deleted", f"Failed to delete Key Vault '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az keyvault purge -n {resource['name']} --location \"{resource['location']}\"", f"Key Vault '{resource['name']}' purged", f"Failed to purge Key Vault '{resource['name']}'", print_command_to_run = False, print_errors = False) +def _print_log(message: str, prefix: str = '', color: str = '', output: str = '', duration: str = '', show_time: bool = False, blank_above: bool = False, blank_below: bool = False, wrap_lines: bool = False) -> None: + """ + Print a formatted log message with optional prefix, color, output, duration, and time. + Handles blank lines above and below the message for readability. - # Delete the resource group last - print_message(f"Deleting resource group '{rg_name}'...") - output = run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) + Args: + message (str): The message to print. + prefix (str, optional): Prefix for the message. + color (str, optional): ANSI color code. + output (str, optional): Additional output to append. + duration (str, optional): Duration string to append. + show_time (bool, optional): Whether to show the current time. + blank_above (bool, optional): Whether to print a blank line above. + blank_below (bool, optional): Whether to print a blank line below. + wrap_lines (bool, optional): Whether to wrap lines to fit console width. + """ + time_str = f' ⌚ {datetime.datetime.now().time()}' if show_time else '' + output_str = f' {output}' if output else '' + + if blank_above: + return - print_message('Cleanup completed.') + # Limit concurrent operations to avoid overwhelming Azure APIs + max_workers = min(len(resources), 5) - except Exception as e: - print(f'An error occurred during cleanup: {e}') - traceback.print_exc() + # Determine if we need thread-safe printing + use_thread_safe_printing = bool(thread_prefix) + + # Helper function for thread-safe or regular printing + def log_info(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '👉🏽 ', thread_color) + else: + print_info(msg) + + def log_success(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color, show_time=True) + else: + print_success(msg) + + def log_error(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⛔ ', BOLD_R) + else: + print_error(msg) + + def log_ok(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color) + else: + print_ok(msg) + + def log_warning(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⚠️ ', BOLD_Y) + else: + print_warning(msg) + + log_info(f'Starting parallel cleanup of {len(resources)} resources with {max_workers} workers...') + + completed_count = 0 + failed_count = 0 + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all cleanup tasks + future_to_resource = { + executor.submit(_cleanup_single_resource, resource): resource + for resource in resources + } + + # Wait for completion and track results + for future in as_completed(future_to_resource): + resource = future_to_resource[future] + try: + success, error_msg = future.result() + completed_count += 1 + + if success: + log_success(f"✓ Cleaned up {resource['type']} '{resource['name']}' ({completed_count}/{len(resources)})") + else: + failed_count += 1 + log_error(f"✗ Failed to clean up {resource['type']} '{resource['name']}': {error_msg}") + + except Exception as e: + failed_count += 1 + log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") + + # Summary + if failed_count == 0: + log_ok(f'All {len(resources)} resources cleaned up successfully!') + else: + log_warning(f'Completed with {failed_count} failures out of {len(resources)} total resources.') + if completed_count - failed_count > 0: + log_info(f'{completed_count - failed_count} resources cleaned up successfully.') def _print_log(message: str, prefix: str = '', color: str = '', output: str = '', duration: str = '', show_time: bool = False, blank_above: bool = False, blank_below: bool = False, wrap_lines: bool = False) -> None: """ @@ -1376,7 +1688,7 @@ def _cleanup_resources_thread_safe(deployment_name: str, rg_name: str, thread_pr def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> None: """ Clean up resources with thread-safe printing (internal implementation for parallel execution). - This is a modified version of _cleanup_resources that uses thread-safe output. + This is a modified version of _cleanup_resources that uses thread-safe output and parallel resource cleanup. """ if not deployment_name: with _print_lock: @@ -1396,35 +1708,50 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) if output.success and output.json_data: - # Delete and purge CognitiveService accounts - output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + # Collect all resources that need to be deleted and purged + resources_to_cleanup = [] + # List CognitiveService accounts + output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: - with _print_lock: - _print_log(f"{thread_prefix}Deleting and purging Cognitive Service Account '{resource['name']}'...", '👉🏽 ', thread_color) - output = run(f"az cognitiveservices account delete -g {rg_name} -n {resource['name']}", f"Cognitive Services '{resource['name']}' deleted", f"Failed to delete Cognitive Services '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az cognitiveservices account purge -g {rg_name} -n {resource['name']} --location \"{resource['location']}\"", f"Cognitive Services '{resource['name']}' purged", f"Failed to purge Cognitive Services '{resource['name']}'", print_command_to_run = False, print_errors = False) - - # Delete and purge APIM resources + resources_to_cleanup.append({ + 'type': 'cognitiveservices', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List APIM resources output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: for resource in output.json_data: - with _print_lock: - _print_log(f"{thread_prefix}Deleting and purging API Management '{resource['name']}'...", '👉🏽 ', thread_color) - output = run(f"az apim delete -n {resource['name']} -g {rg_name} -y", f"API Management '{resource['name']}' deleted", f"Failed to delete API Management '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az apim deletedservice purge --service-name {resource['name']} --location \"{resource['location']}\"", f"API Management '{resource['name']}' purged", f"Failed to purge API Management '{resource['name']}'", print_command_to_run = False, print_errors = False) - - # Delete and purge Key Vault resources + resources_to_cleanup.append({ + 'type': 'apim', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List Key Vault resources output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: for resource in output.json_data: - with _print_lock: - _print_log(f"{thread_prefix}Deleting and purging Key Vault '{resource['name']}'...", '👉🏽 ', thread_color) - output = run(f"az keyvault delete -n {resource['name']} -g {rg_name}", f"Key Vault '{resource['name']}' deleted", f"Failed to delete Key Vault '{resource['name']}'", print_command_to_run = False, print_errors = False) - output = run(f"az keyvault purge -n {resource['name']} --location \"{resource['location']}\"", f"Key Vault '{resource['name']}' purged", f"Failed to purge Key Vault '{resource['name']}'", print_command_to_run = False, print_errors = False) + resources_to_cleanup.append({ + 'type': 'keyvault', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # Delete and purge resources in parallel if there are any + if resources_to_cleanup: + with _print_lock: + _print_log(f"{thread_prefix}Found {len(resources_to_cleanup)} resources to clean up. Processing in parallel...", '👉🏽 ', thread_color) + _cleanup_resources_parallel_thread_safe(resources_to_cleanup, thread_prefix, thread_color) + else: + with _print_lock: + _print_log(f"{thread_prefix}No resources found to clean up.", '👉🏽 ', thread_color) # Delete the resource group last with _print_lock: diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index c22ddff..2896213 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -1484,7 +1484,7 @@ def mock_run(*args, **kwargs): def test_cleanup_resources_partial_failures(monkeypatch): - """Test _cleanup_resources when some operations fail.""" + """Test _cleanup_resources when some operations fail with parallel cleanup.""" run_commands = [] def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): @@ -1518,28 +1518,37 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True monkeypatch.setattr(utils, 'run', mock_run) monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_success', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_error', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_warning', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) # Should not raise exception even when individual operations fail utils._cleanup_resources('test-deployment', 'test-rg') - # Verify all expected commands were attempted despite failures + # Verify all listing and group operations were attempted + # Note: With parallel cleanup, if delete fails, purge is not attempted (expected behavior) expected_patterns = [ 'deployment group show', 'cognitiveservices account list', - 'cognitiveservices account delete', - 'cognitiveservices account purge', 'apim list', - 'apim delete', - 'apim deletedservice purge', 'keyvault list', - 'keyvault delete', - 'keyvault purge', 'group delete' ] for pattern in expected_patterns: assert any(pattern in cmd for cmd in run_commands), f"Expected command pattern not found: {pattern}" + # Verify delete attempts were made (even though they failed) + delete_patterns = [ + 'cognitiveservices account delete', + 'apim delete', + 'keyvault delete' + ] + + for pattern in delete_patterns: + assert any(pattern in cmd for cmd in run_commands), f"Expected delete command pattern not found: {pattern}" + def test_cleanup_resources_malformed_responses(monkeypatch): """Test _cleanup_resources with malformed API responses.""" From 5a3628aa1b6079ede5f38b0a1abf29c7a37eeb74 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 11:39:27 -0500 Subject: [PATCH 03/23] Fix line ending --- .vscode/settings.json | 1 + setup/setup_python_path.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index 4b70171..e9e706b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,6 +13,7 @@ "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, + "files.eol": "\n", "editor.renderWhitespace": "trailing", "python.defaultInterpreterPath": "./.venv/Scripts/python.exe", "python.pythonPath": "./.venv/Scripts/python.exe", diff --git a/setup/setup_python_path.py b/setup/setup_python_path.py index 6b43bae..d32f489 100644 --- a/setup/setup_python_path.py +++ b/setup/setup_python_path.py @@ -186,6 +186,7 @@ def create_vscode_settings(): "files.trimTrailingWhitespace": True, "files.insertFinalNewline": True, "files.trimFinalNewlines": True, + "files.eol": "\n", "editor.renderWhitespace": "trailing", "python.defaultInterpreterPath": "./.venv/Scripts/python.exe" if os.name == 'nt' else "./.venv/bin/python", "python.pythonPath": "./.venv/Scripts/python.exe" if os.name == 'nt' else "./.venv/bin/python", From 80ee8af05eee3bb9417c733558680fd140fe5bd8 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 11:39:42 -0500 Subject: [PATCH 04/23] Fix pylint warning --- tests/python/test_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 2896213..909d7da 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -51,8 +51,9 @@ def mock_run_multiple(*args, **kwargs): call_count[0] += 1 if call_count[0] == 1: # First call: az account show return MagicMock(success=True, json_data=mock_json) - else: # Second call: az ad signed-in-user show - return MagicMock(success=True, json_data=mock_ad_json) + + # Second call: az ad signed-in-user show + return MagicMock(success=True, json_data=mock_ad_json) monkeypatch.setattr(utils, 'run', mock_run_multiple) result = utils.get_account_info() From fc92482d2be64d5103751d79b6f954cf22852f80 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 11:56:12 -0500 Subject: [PATCH 05/23] Move console logging to its own file --- shared/python/console.py | 124 ++++++++ shared/python/utils.py | 190 ++---------- tests/python/test_console.py | 551 +++++++++++++++++++++++++++++++++++ 3 files changed, 692 insertions(+), 173 deletions(-) create mode 100644 shared/python/console.py create mode 100644 tests/python/test_console.py diff --git a/shared/python/console.py b/shared/python/console.py new file mode 100644 index 0000000..d65042e --- /dev/null +++ b/shared/python/console.py @@ -0,0 +1,124 @@ +""" +Console output utilities for APIM samples. + +This module provides formatted console output functions with ANSI color support, +thread-safe printing for parallel operations, and consistent message formatting. +""" + +import datetime +import textwrap +import threading + + +# ------------------------------ +# CONSTANTS +# ------------------------------ + +# ANSI escape code constants for colored console output +BOLD_B = '\x1b[1;34m' # blue +BOLD_G = '\x1b[1;32m' # green +BOLD_R = '\x1b[1;31m' # red +BOLD_Y = '\x1b[1;33m' # yellow +BOLD_C = '\x1b[1;36m' # cyan +BOLD_M = '\x1b[1;35m' # magenta +BOLD_W = '\x1b[1;37m' # white +RESET = '\x1b[0m' + +# Thread colors for parallel operations +THREAD_COLORS = [BOLD_B, BOLD_G, BOLD_Y, BOLD_C, BOLD_M, BOLD_W] + +CONSOLE_WIDTH = 175 + +# Thread-safe print lock +_print_lock = threading.Lock() + + +# ------------------------------ +# PRIVATE METHODS +# ------------------------------ + +def _print_log(message: str, prefix: str = '', color: str = '', output: str = '', duration: str = '', show_time: bool = False, blank_above: bool = False, blank_below: bool = False, wrap_lines: bool = False) -> None: + """ + Print a formatted log message with optional prefix, color, output, duration, and time. + Handles blank lines above and below the message for readability. + + Args: + message (str): The message to print. + prefix (str, optional): Prefix for the message. + color (str, optional): ANSI color code. + output (str, optional): Additional output to append. + duration (str, optional): Duration string to append. + show_time (bool, optional): Whether to show the current time. + blank_above (bool, optional): Whether to print a blank line above. + blank_below (bool, optional): Whether to print a blank line below. + wrap_lines (bool, optional): Whether to wrap lines to fit console width. + """ + time_str = f' ⌚ {datetime.datetime.now().time()}' if show_time else '' + output_str = f' {output}' if output else '' + + if blank_above: + print() + + # To preserve explicit newlines in the message (e.g., from print_val with val_below=True), + # split the message on actual newlines and wrap each line separately, preserving blank lines and indentation. + full_message = f'{prefix}{color}{message}{RESET}{time_str} {duration}{output_str}' + lines = full_message.splitlines(keepends = False) + + for line in lines: + if wrap_lines: + wrapped = textwrap.fill(line, width = CONSOLE_WIDTH) + print(wrapped) + else: + print(line) + + if blank_below: + print() + + +# ------------------------------ +# PUBLIC METHODS +# ------------------------------ + +def print_command(cmd: str = '') -> None: + """Print a command message.""" + _print_log(cmd, '⚙️ ', BOLD_B) + + +def print_error(msg: str, output: str = '', duration: str = '') -> None: + """Print an error message.""" + _print_log(msg, '⛔ ', BOLD_R, output, duration, True) + + +def print_info(msg: str, blank_above: bool = False) -> None: + """Print an informational message.""" + _print_log(msg, '👉🏽 ', BOLD_B, blank_above = blank_above) + + +def print_message(msg: str, output: str = '', duration: str = '', blank_above: bool = False) -> None: + """Print a general message.""" + _print_log(msg, 'ℹ️ ', BOLD_G, output, duration, True, blank_above) + + +def print_ok(msg: str, output: str = '', duration: str = '', blank_above: bool = True) -> None: + """Print an OK/success message.""" + _print_log(msg, '✅ ', BOLD_G, output, duration, True, blank_above) + + +def print_success(msg: str, output: str = '', duration: str = '', blank_above: bool = False) -> None: + """Print a success message.""" + _print_log(msg, '✅ ', BOLD_G, output, duration, True, blank_above) + + +def print_warning(msg: str, output: str = '', duration: str = '') -> None: + """Print a warning message.""" + _print_log(msg, '⚠️ ', BOLD_Y, output, duration, True) + + +def print_val(name: str, value: str, val_below: bool = False) -> None: + """Print a key-value pair.""" + _print_log(f"{name:<25}:{'\n' if val_below else ' '}{value}", '👉🏽 ', BOLD_B) + + +def print_header(msg: str) -> None: + """Print a header message.""" + _print_log(f"\n{'=' * len(msg)}\n{msg}\n{'=' * len(msg)}", '', BOLD_G, blank_above=True, blank_below=True) diff --git a/shared/python/utils.py b/shared/python/utils.py index 7926557..53b1ada 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -3,13 +3,11 @@ """ import ast -import datetime import json import sys import os import re import subprocess -import textwrap import time import traceback import string @@ -26,29 +24,25 @@ from typing import Any, Optional, Tuple from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, _get_project_root - # ------------------------------ -# DECLARATIONS +# RE-EXPORTS (BACKWARD COMPATIBILITY) # ------------------------------ - - -# Define ANSI escape code constants for clarity in the print commands below -BOLD_B = '\x1b[1;34m' # blue -BOLD_G = '\x1b[1;32m' # green -BOLD_R = '\x1b[1;31m' # red -BOLD_Y = '\x1b[1;33m' # yellow -BOLD_C = '\x1b[1;36m' # cyan -BOLD_M = '\x1b[1;35m' # magenta -BOLD_W = '\x1b[1;37m' # white -RESET = '\x1b[0m' - -# Thread colors for parallel operations -THREAD_COLORS = [BOLD_B, BOLD_G, BOLD_Y, BOLD_C, BOLD_M, BOLD_W] - -CONSOLE_WIDTH = 175 - -# Thread-safe print lock -_print_lock = threading.Lock() +# +# The following imports are re-exported from the console module to maintain +# backward compatibility with existing code. This allows files that currently +# use `utils.print_*()` to continue working without modification. +# +# For new code, consider importing directly from the console module: +# from console import print_info, print_error +# +# This re-export pattern allows for gradual migration while keeping the codebase +# functional during refactoring. +# +from console import ( + BOLD_B, BOLD_C, BOLD_G, BOLD_M, BOLD_R, BOLD_W, BOLD_Y, + CONSOLE_WIDTH, RESET, THREAD_COLORS, _print_lock, _print_log, + print_command, print_error, print_header, print_info, print_message, print_ok, print_success, print_val, print_warning +) # ------------------------------ @@ -978,145 +972,6 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: traceback.print_exc() -def _print_log(message: str, prefix: str = '', color: str = '', output: str = '', duration: str = '', show_time: bool = False, blank_above: bool = False, blank_below: bool = False, wrap_lines: bool = False) -> None: - """ - Print a formatted log message with optional prefix, color, output, duration, and time. - Handles blank lines above and below the message for readability. - - Args: - message (str): The message to print. - prefix (str, optional): Prefix for the message. - color (str, optional): ANSI color code. - output (str, optional): Additional output to append. - duration (str, optional): Duration string to append. - show_time (bool, optional): Whether to show the current time. - blank_above (bool, optional): Whether to print a blank line above. - blank_below (bool, optional): Whether to print a blank line below. - wrap_lines (bool, optional): Whether to wrap lines to fit console width. - """ - time_str = f' ⌚ {datetime.datetime.now().time()}' if show_time else '' - output_str = f' {output}' if output else '' - - if blank_above: - return - - # Limit concurrent operations to avoid overwhelming Azure APIs - max_workers = min(len(resources), 5) - - # Determine if we need thread-safe printing - use_thread_safe_printing = bool(thread_prefix) - - # Helper function for thread-safe or regular printing - def log_info(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '👉🏽 ', thread_color) - else: - print_info(msg) - - def log_success(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color, show_time=True) - else: - print_success(msg) - - def log_error(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '⛔ ', BOLD_R) - else: - print_error(msg) - - def log_ok(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color) - else: - print_ok(msg) - - def log_warning(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '⚠️ ', BOLD_Y) - else: - print_warning(msg) - - log_info(f'Starting parallel cleanup of {len(resources)} resources with {max_workers} workers...') - - completed_count = 0 - failed_count = 0 - - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit all cleanup tasks - future_to_resource = { - executor.submit(_cleanup_single_resource, resource): resource - for resource in resources - } - - # Wait for completion and track results - for future in as_completed(future_to_resource): - resource = future_to_resource[future] - try: - success, error_msg = future.result() - completed_count += 1 - - if success: - log_success(f"✓ Cleaned up {resource['type']} '{resource['name']}' ({completed_count}/{len(resources)})") - else: - failed_count += 1 - log_error(f"✗ Failed to clean up {resource['type']} '{resource['name']}': {error_msg}") - - except Exception as e: - failed_count += 1 - log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") - - # Summary - if failed_count == 0: - log_ok(f'All {len(resources)} resources cleaned up successfully!') - else: - log_warning(f'Completed with {failed_count} failures out of {len(resources)} total resources.') - if completed_count - failed_count > 0: - log_info(f'{completed_count - failed_count} resources cleaned up successfully.') - -def _print_log(message: str, prefix: str = '', color: str = '', output: str = '', duration: str = '', show_time: bool = False, blank_above: bool = False, blank_below: bool = False, wrap_lines: bool = False) -> None: - """ - Print a formatted log message with optional prefix, color, output, duration, and time. - Handles blank lines above and below the message for readability. - - Args: - message (str): The message to print. - prefix (str, optional): Prefix for the message. - color (str, optional): ANSI color code. - output (str, optional): Additional output to append. - duration (str, optional): Duration string to append. - show_time (bool, optional): Whether to show the current time. - blank_above (bool, optional): Whether to print a blank line above. - blank_below (bool, optional): Whether to print a blank line below. - wrap_lines (bool, optional): Whether to wrap lines to fit console width. - """ - time_str = f' ⌚ {datetime.datetime.now().time()}' if show_time else '' - output_str = f' {output}' if output else '' - - if blank_above: - print() - - # To preserve explicit newlines in the message (e.g., from print_val with val_below=True), - # split the message on actual newlines and wrap each line separately, preserving blank lines and indentation. - full_message = f'{prefix}{color}{message}{RESET}{time_str} {duration}{output_str}' - lines = full_message.splitlines(keepends = False) - - for line in lines: - if (wrap_lines): - wrapped = textwrap.fill(line, width = CONSOLE_WIDTH) - print(wrapped) - else: - print(line) - - if blank_below: - print() - - def _determine_bicep_directory(infrastructure_dir: str) -> str: """ Determine the correct Bicep directory based on the current working directory and infrastructure directory name. @@ -1173,17 +1028,6 @@ def _determine_bicep_directory(infrastructure_dir: str) -> str: # PUBLIC METHODS # ------------------------------ -print_command = lambda cmd = '' : _print_log(cmd, '⚙️ ', BOLD_B) -print_error = lambda msg, output = '', duration = '' : _print_log(msg, '⛔ ', BOLD_R, output, duration, True) -print_info = lambda msg, blank_above = False : _print_log(msg, '👉🏽 ', BOLD_B, blank_above = blank_above) -print_message = lambda msg, output = '', duration = '', blank_above = False : _print_log(msg, 'ℹ️ ', BOLD_G, output, duration, True, blank_above) -print_ok = lambda msg, output = '', duration = '', blank_above = True : _print_log(msg, '✅ ', BOLD_G, output, duration, True, blank_above) -print_success = lambda msg, output = '', duration = '', blank_above = False : _print_log(msg, '✅ ', BOLD_G, output, duration, True, blank_above) -print_warning = lambda msg, output = '', duration = '' : _print_log(msg, '⚠️ ', BOLD_Y, output, duration, True) -print_val = lambda name, value, val_below = False : _print_log(f"{name:<25}:{'\n' if val_below else ' '}{value}", '👉🏽 ', BOLD_B) -print_header = lambda msg : _print_log(f"\n{'=' * len(msg)}\n{msg}\n{'=' * len(msg)}", '', BOLD_G, blank_above=True, blank_below=True) - - def get_azure_role_guid(role_name: str) -> Optional[str]: """ Load the Azure roles JSON file and return the GUID for the specified role name. diff --git a/tests/python/test_console.py b/tests/python/test_console.py new file mode 100644 index 0000000..225d644 --- /dev/null +++ b/tests/python/test_console.py @@ -0,0 +1,551 @@ +""" +Unit tests for the console module. + +Tests all public console output functions including formatting, colors, +thread safety, and various output options. +""" + +import io +import sys +import threading +import console + + +# ------------------------------ +# HELPER FUNCTIONS +# ------------------------------ + +def capture_output(func, *args, **kwargs): + """ + Capture stdout from a function call. + + Args: + func: Function to call + *args: Positional arguments for the function + **kwargs: Keyword arguments for the function + + Returns: + str: Captured output + """ + captured_output = io.StringIO() + sys.stdout = captured_output + try: + func(*args, **kwargs) + return captured_output.getvalue() + finally: + sys.stdout = sys.__stdout__ + + +# ------------------------------ +# CONSTANTS TESTS +# ------------------------------ + +def test_ansi_color_constants(): + """Test that all ANSI color constants are properly defined.""" + assert console.BOLD_B == '\x1b[1;34m' + assert console.BOLD_G == '\x1b[1;32m' + assert console.BOLD_R == '\x1b[1;31m' + assert console.BOLD_Y == '\x1b[1;33m' + assert console.BOLD_C == '\x1b[1;36m' + assert console.BOLD_M == '\x1b[1;35m' + assert console.BOLD_W == '\x1b[1;37m' + assert console.RESET == '\x1b[0m' + + +def test_thread_colors_list(): + """Test that thread colors list contains expected values.""" + assert len(console.THREAD_COLORS) == 6 + assert console.BOLD_B in console.THREAD_COLORS + assert console.BOLD_G in console.THREAD_COLORS + assert console.BOLD_Y in console.THREAD_COLORS + + +def test_console_width(): + """Test that console width is set to expected value.""" + assert console.CONSOLE_WIDTH == 175 + + +def test_print_lock_exists(): + """Test that the print lock is properly initialized.""" + assert isinstance(console._print_lock, type(threading.Lock())) + + +# ------------------------------ +# print_command TESTS +# ------------------------------ + +def test_print_command_basic(): + """Test print_command with basic message.""" + output = capture_output(console.print_command, 'az login') + assert '⚙️' in output + assert 'az login' in output + assert console.BOLD_B in output + assert console.RESET in output + + +def test_print_command_empty(): + """Test print_command with empty string.""" + output = capture_output(console.print_command, '') + assert '⚙️' in output + + +def test_print_command_multiline(): + """Test print_command with multiline message.""" + output = capture_output(console.print_command, 'line1\nline2\nline3') + assert 'line1' in output + assert 'line2' in output + assert 'line3' in output + + +# ------------------------------ +# print_error TESTS +# ------------------------------ + +def test_print_error_basic(): + """Test print_error with basic message.""" + output = capture_output(console.print_error, 'Error occurred') + assert '⛔' in output + assert 'Error occurred' in output + assert console.BOLD_R in output + assert '⌚' in output # time should be shown + + +def test_print_error_with_output(): + """Test print_error with additional output.""" + output = capture_output(console.print_error, 'Failed', output='exit code 1') + assert 'Failed' in output + assert 'exit code 1' in output + + +def test_print_error_with_duration(): + """Test print_error with duration.""" + output = capture_output(console.print_error, 'Timeout', duration='30s') + assert 'Timeout' in output + assert '30s' in output + + +def test_print_error_with_all_options(): + """Test print_error with all optional parameters.""" + output = capture_output(console.print_error, 'Complete failure', output='details', duration='5s') + assert 'Complete failure' in output + assert 'details' in output + assert '5s' in output + + +# ------------------------------ +# print_info TESTS +# ------------------------------ + +def test_print_info_basic(): + """Test print_info with basic message.""" + output = capture_output(console.print_info, 'Information message') + assert '👉🏽' in output + assert 'Information message' in output + assert console.BOLD_B in output + + +def test_print_info_blank_above(): + """Test print_info with blank line above.""" + output = capture_output(console.print_info, 'Info', blank_above=True) + lines = output.split('\n') + assert not lines[0] # First line should be blank + assert 'Info' in output + + +def test_print_info_no_blank_above(): + """Test print_info without blank line above (default).""" + output = capture_output(console.print_info, 'Info') + assert not output.startswith('\n') + + +# ------------------------------ +# print_message TESTS +# ------------------------------ + +def test_print_message_basic(): + """Test print_message with basic message.""" + output = capture_output(console.print_message, 'General message') + assert 'ℹ️' in output + assert 'General message' in output + assert console.BOLD_G in output + assert '⌚' in output + + +def test_print_message_with_output(): + """Test print_message with additional output.""" + output = capture_output(console.print_message, 'Processing', output='success') + assert 'Processing' in output + assert 'success' in output + + +def test_print_message_with_duration(): + """Test print_message with duration.""" + output = capture_output(console.print_message, 'Completed', duration='2.5s') + assert 'Completed' in output + assert '2.5s' in output + + +def test_print_message_blank_above(): + """Test print_message with blank line above.""" + output = capture_output(console.print_message, 'Message', blank_above=True) + lines = output.split('\n') + assert not lines[0] + + +# ------------------------------ +# print_ok TESTS +# ------------------------------ + +def test_print_ok_basic(): + """Test print_ok with basic message.""" + output = capture_output(console.print_ok, 'Operation successful') + assert '✅' in output + assert 'Operation successful' in output + assert console.BOLD_G in output + assert '⌚' in output + + +def test_print_ok_default_blank_above(): + """Test print_ok has blank line above by default.""" + output = capture_output(console.print_ok, 'OK') + lines = output.split('\n') + assert not lines[0] # Default blank_above=True + + +def test_print_ok_no_blank_above(): + """Test print_ok without blank line above.""" + output = capture_output(console.print_ok, 'OK', blank_above=False) + assert not output.startswith('\n') + + +def test_print_ok_with_output_and_duration(): + """Test print_ok with all options.""" + output = capture_output(console.print_ok, 'Done', output='result', duration='1.2s') + assert 'Done' in output + assert 'result' in output + assert '1.2s' in output + + +# ------------------------------ +# print_success TESTS +# ------------------------------ + +def test_print_success_basic(): + """Test print_success with basic message.""" + output = capture_output(console.print_success, 'Success!') + assert '✅' in output + assert 'Success!' in output + assert console.BOLD_G in output + + +def test_print_success_no_blank_above_default(): + """Test print_success has no blank line above by default.""" + output = capture_output(console.print_success, 'Success') + assert not output.startswith('\n') + + +def test_print_success_with_blank_above(): + """Test print_success with blank line above.""" + output = capture_output(console.print_success, 'Success', blank_above=True) + lines = output.split('\n') + assert not lines[0] + + +def test_print_success_with_all_options(): + """Test print_success with all optional parameters.""" + output = capture_output(console.print_success, 'Deployed', output='url', duration='30s', blank_above=True) + assert 'Deployed' in output + assert 'url' in output + assert '30s' in output + + +# ------------------------------ +# print_warning TESTS +# ------------------------------ + +def test_print_warning_basic(): + """Test print_warning with basic message.""" + output = capture_output(console.print_warning, 'Warning message') + assert '⚠️' in output + assert 'Warning message' in output + assert console.BOLD_Y in output + assert '⌚' in output + + +def test_print_warning_with_output(): + """Test print_warning with additional output.""" + output = capture_output(console.print_warning, 'Deprecated', output='use v2 instead') + assert 'Deprecated' in output + assert 'use v2 instead' in output + + +def test_print_warning_with_duration(): + """Test print_warning with duration.""" + output = capture_output(console.print_warning, 'Slow operation', duration='45s') + assert 'Slow operation' in output + assert '45s' in output + + +# ------------------------------ +# print_val TESTS +# ------------------------------ + +def test_print_val_inline(): + """Test print_val with value on same line.""" + output = capture_output(console.print_val, 'Name', 'John Doe') + assert '👉🏽' in output + assert 'Name' in output + assert 'John Doe' in output + assert console.BOLD_B in output + # Should have colon followed by space + assert ': John Doe' in output + + +def test_print_val_below(): + """Test print_val with value on line below.""" + output = capture_output(console.print_val, 'Description', 'A long value', val_below=True) + assert 'Description' in output + assert 'A long value' in output + # Should have colon followed by newline + lines = output.split('\n') + assert any('Description' in line and ':' in line for line in lines) + + +def test_print_val_alignment(): + """Test print_val formats name with proper alignment.""" + output = capture_output(console.print_val, 'Key', 'Value') + # Name should be left-aligned in 25-char field + assert 'Key' in output + + +def test_print_val_empty_value(): + """Test print_val with empty value.""" + output = capture_output(console.print_val, 'Empty', '') + assert 'Empty' in output + + +# ------------------------------ +# print_header TESTS +# ------------------------------ + +def test_print_header_basic(): + """Test print_header with basic message.""" + output = capture_output(console.print_header, 'SECTION HEADER') + assert 'SECTION HEADER' in output + assert console.BOLD_G in output + # Should have equal signs above and below + assert '=' * len('SECTION HEADER') in output + + +def test_print_header_blank_lines(): + """Test print_header includes blank lines above and below.""" + output = capture_output(console.print_header, 'TEST') + lines = output.split('\n') + # Should have blank line at start (blank_above=True) + # Then newline in the message itself + assert not lines[0] + + +def test_print_header_equals_length(): + """Test print_header equals signs match message length.""" + msg = 'CONFIGURATION' + output = capture_output(console.print_header, msg) + equals_line = '=' * len(msg) + assert output.count(equals_line) == 2 # Above and below + + +def test_print_header_short_message(): + """Test print_header with very short message.""" + output = capture_output(console.print_header, 'X') + assert 'X' in output + assert '=' in output + + +# ------------------------------ +# _print_log TESTS +# ------------------------------ + +def test_print_log_basic(): + """Test _print_log with minimal parameters.""" + output = capture_output(console._print_log, 'Test message') + assert 'Test message' in output + + +def test_print_log_with_prefix(): + """Test _print_log with prefix.""" + output = capture_output(console._print_log, 'Message', prefix='>> ') + assert '>> Message' in output + + +def test_print_log_with_color(): + """Test _print_log with color.""" + output = capture_output(console._print_log, 'Colored', color=console.BOLD_R) + assert console.BOLD_R in output + assert console.RESET in output + + +def test_print_log_show_time(): + """Test _print_log with show_time enabled.""" + output = capture_output(console._print_log, 'Timed', show_time=True) + assert '⌚' in output + + +def test_print_log_blank_above(): + """Test _print_log with blank line above.""" + output = capture_output(console._print_log, 'Message', blank_above=True) + lines = output.split('\n') + assert not lines[0] + + +def test_print_log_blank_below(): + """Test _print_log with blank line below.""" + output = capture_output(console._print_log, 'Message', blank_below=True) + assert output.endswith('\n\n') + + +def test_print_log_multiline_preservation(): + """Test _print_log preserves explicit newlines in message.""" + output = capture_output(console._print_log, 'Line1\nLine2\nLine3') + lines = [line for line in output.split('\n') if line] + assert len(lines) >= 3 + assert 'Line1' in output + assert 'Line2' in output + assert 'Line3' in output + + +def test_print_log_wrap_lines(): + """Test _print_log with wrap_lines enabled.""" + long_message = 'x' * 200 # Longer than CONSOLE_WIDTH + output = capture_output(console._print_log, long_message, wrap_lines=True) + # Should wrap the line + assert 'x' in output + + +# ------------------------------ +# THREAD SAFETY TESTS +# ------------------------------ + +def test_print_lock_thread_safety(): + """Test that print operations are thread-safe.""" + results = [] + + def print_in_thread(msg): + console.print_info(msg) + results.append(msg) + + threads = [] + for i in range(10): + thread = threading.Thread(target=print_in_thread, args=(f'Thread {i}',)) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + + assert len(results) == 10 + + +def test_concurrent_prints(): + """Test multiple concurrent print operations.""" + def concurrent_print(): + console.print_command('command') + console.print_info('info') + console.print_success('success') + + threads = [threading.Thread(target=concurrent_print) for _ in range(5)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + # If we get here without deadlock, test passes + + +# ------------------------------ +# EDGE CASES +# ------------------------------ + +def test_empty_strings(): + """Test all print functions handle empty strings.""" + output = capture_output(console.print_command, '') + assert output # Should still print prefix + + output = capture_output(console.print_error, '') + assert output + + output = capture_output(console.print_info, '') + assert output + + +def test_special_characters(): + """Test handling of special characters.""" + special = '!@#$%^&*()[]{}|\\:;"\'<>,.?/~`' + output = capture_output(console.print_info, special) + # Most characters should appear (some might be processed by terminal) + assert '!' in output or '@' in output + + +def test_unicode_characters(): + """Test handling of Unicode characters.""" + unicode_msg = '🚀 Deployment 成功 ✨' + output = capture_output(console.print_info, unicode_msg) + # Unicode should be preserved + assert '🚀' in output or 'Deployment' in output + + +def test_very_long_message(): + """Test handling of very long messages.""" + long_msg = 'A' * 1000 + output = capture_output(console.print_info, long_msg) + assert 'A' in output + + +def test_null_duration_and_output(): + """Test functions with None values for optional parameters.""" + output = capture_output(console.print_error, 'Error', output=None, duration=None) + assert 'Error' in output + + +# ------------------------------ +# INTEGRATION TESTS +# ------------------------------ + +def test_mixed_function_calls(): + """Test calling multiple different print functions in sequence.""" + output = io.StringIO() + sys.stdout = output + try: + console.print_header('TEST SUITE') + console.print_command('az login') + console.print_info('Starting test') + console.print_success('Step 1 complete') + console.print_warning('Slow operation') + console.print_error('Step 2 failed') + console.print_ok('Recovery successful') + console.print_val('Result', 'PASS') + + result = output.getvalue() + assert 'TEST SUITE' in result + assert 'az login' in result + assert 'Starting test' in result + assert 'complete' in result + assert 'PASS' in result + finally: + sys.stdout = sys.__stdout__ + + +def test_all_colors_present(): + """Test that different functions use different colors.""" + functions_and_colors = [ + (console.print_command, console.BOLD_B), + (console.print_error, console.BOLD_R), + (console.print_info, console.BOLD_B), + (console.print_message, console.BOLD_G), + (console.print_ok, console.BOLD_G), + (console.print_success, console.BOLD_G), + (console.print_warning, console.BOLD_Y), + ] + + for func, expected_color in functions_and_colors: + output = capture_output(func, 'test') + assert expected_color in output, f'{func.__name__} should use {expected_color}' From 623ab4ebace80a0c69173ae5967e8ff24c83e500 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 16:36:06 -0500 Subject: [PATCH 06/23] Move infrastructure cleanup to its own file --- shared/python/apimtypes.py | 158 ++++++- shared/python/infrastructures.py | 466 +++++++++++++++++- shared/python/json_utils.py | 77 +++ shared/python/utils.py | 644 +------------------------ tests/python/test_infrastructures.py | 674 ++++++++++++++++++++++++++- tests/python/test_utils.py | 669 -------------------------- 6 files changed, 1370 insertions(+), 1318 deletions(-) create mode 100644 shared/python/json_utils.py diff --git a/shared/python/apimtypes.py b/shared/python/apimtypes.py index 26d53d2..8f3db60 100644 --- a/shared/python/apimtypes.py +++ b/shared/python/apimtypes.py @@ -3,10 +3,14 @@ """ import os +import json +import ast from enum import StrEnum from dataclasses import dataclass from pathlib import Path from typing import List, Optional, Any +from console import (print_error, print_val) +from json_utils import is_string_json, extract_json # ------------------------------ @@ -154,9 +158,157 @@ class Endpoints(object): def __init__(self, deployment: INFRASTRUCTURE): self.deployment = deployment -# ------------------------------ -# CLASSES -# ------------------------------ + +class Output(object): + """ + Represents the output of a command or deployment, including success status, raw text, and parsed JSON data. + """ + + # ------------------------------ + # CONSTRUCTOR + # ------------------------------ + + def __init__(self, success: bool, text: str): + """ + Initialize the Output object with command success status and output text. + Attempts to parse JSON from the output text. + """ + + self.success = success + self.text = text + self.jsonParseException = None + + # Check if the exact string is JSON. + if (is_string_json(text)): + try: + self.json_data = json.loads(text) + except json.JSONDecodeError as e: + self.jsonParseException = e + self.json_data = extract_json(text) + else: + # Check if a substring in the string is JSON. + self.json_data = extract_json(text) + + self.is_json = self.json_data is not None + + def get(self, key: str, label: str = '', secure: bool = False, suppress_logging: bool = False) -> str | None: + """ + Retrieve a deployment output property by key, with optional label and secure masking. + + Args: + key (str): The output key to retrieve. + label (str, optional): Optional label for logging. + secure (bool, optional): If True, masks the value in logs. + + Returns: + str | None: The value as a string, or None if not found. + """ + + try: + if not isinstance(self.json_data, dict): + raise KeyError('json_data is not a dict') + + if 'properties' in self.json_data: + properties = self.json_data.get('properties') + if not isinstance(properties, dict): + raise KeyError("'properties' is not a dict in deployment result") + + outputs = properties.get('outputs') + if not isinstance(outputs, dict): + raise KeyError("'outputs' is missing or not a dict in deployment result") + + output_entry = outputs.get(key) + if not isinstance(output_entry, dict) or 'value' not in output_entry: + raise KeyError(f"Output key '{key}' not found in deployment outputs") + + deployment_output = output_entry['value'] + elif key in self.json_data: + deployment_output = self.json_data[key]['value'] + + if not suppress_logging and label: + if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: + print_val(label, f'****{deployment_output[-4:]}') + else: + print_val(label, deployment_output) + + return str(deployment_output) + + except Exception as e: + error = f"Failed to retrieve output property: '{key}'\nError: {e}" + print_error(error) + + if label: + raise Exception(error) + + return None + + def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logging: bool = False) -> Any: + """ + Retrieve a deployment output property by key and return it as a JSON object. + This method is independent from get() and retrieves the raw deployment output value. + + Args: + key (str): The output key to retrieve. + label (str, optional): Optional label for logging. + secure (bool, optional): If True, masks the value in logs. + + Returns: + Any: The value as a JSON object (dict, list, etc.), or the original value if not JSON, or None if not found. + """ + + try: + if not isinstance(self.json_data, dict): + raise KeyError('json_data is not a dict') + + if 'properties' in self.json_data: + properties = self.json_data.get('properties') + if not isinstance(properties, dict): + raise KeyError("'properties' is not a dict in deployment result") + + outputs = properties.get('outputs') + if not isinstance(outputs, dict): + raise KeyError("'outputs' is missing or not a dict in deployment result") + + output_entry = outputs.get(key) + if not isinstance(output_entry, dict) or 'value' not in output_entry: + raise KeyError(f"Output key '{key}' not found in deployment outputs") + + deployment_output = output_entry['value'] + elif key in self.json_data: + deployment_output = self.json_data[key]['value'] + + if not suppress_logging and label: + if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: + print_val(label, f'****{deployment_output[-4:]}') + else: + print_val(label, deployment_output) + + # If the result is a string, try to parse it as JSON + if isinstance(deployment_output, str): + # First try JSON parsing (handles double quotes) + try: + return json.loads(deployment_output) + except json.JSONDecodeError: + pass + + # If JSON fails, try Python literal evaluation (handles single quotes) + try: + return ast.literal_eval(deployment_output) + except (ValueError, SyntaxError) as e: + print_error(f'Failed to parse deployment output as Python literal. Error: {e}') + pass + + # Return the original result if it's not a string or can't be parsed + return deployment_output + + except Exception as e: + error = f"Failed to retrieve output property: '{key}'\nError: {e}" + print_error(error) + + if label: + raise Exception(error) + + return None @dataclass class API: diff --git a/shared/python/infrastructures.py b/shared/python/infrastructures.py index 043afbf..0238e1f 100644 --- a/shared/python/infrastructures.py +++ b/shared/python/infrastructures.py @@ -1,14 +1,20 @@ """ -Infrastructure Types +Infrastructure Types and Cleanup Utilities """ import json import os import time +import traceback +from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path from apimtypes import * import utils from utils import Output +from console import ( + BOLD_R, BOLD_Y, RESET, THREAD_COLORS, _print_lock, _print_log, + print_error, print_info, print_message, print_ok, print_success, print_warning +) # ------------------------------ @@ -1023,3 +1029,461 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: except Exception as e: print(f'⚠️ APPGW-APIM-PE verification failed with error: {str(e)}') return False + + +# ------------------------------ +# INFRASTRUCTURE CLEANUP FUNCTIONS +# ------------------------------ + +def _cleanup_single_resource(resource: dict) -> tuple[bool, str]: + """ + Delete and purge a single Azure resource (worker function for parallel cleanup). + + This is the atomic unit of work that deletes and purges one resource. + Called by _cleanup_resources_parallel() which manages multiple resources concurrently. + + Args: + resource (dict): Resource information with keys: type, name, location, rg_name + + Returns: + tuple[bool, str]: (success, error_message) + """ + try: + resource_type = resource['type'] + resource_name = resource['name'] + rg_name = resource['rg_name'] + location = resource['location'] + + print_info(f"Deleting and purging {resource_type} '{resource_name}'...") + + # Delete the resource + if resource_type == 'cognitiveservices': + delete_cmd = f"az cognitiveservices account delete -g {rg_name} -n {resource_name}" + purge_cmd = f"az cognitiveservices account purge -g {rg_name} -n {resource_name} --location \"{location}\"" + elif resource_type == 'apim': + delete_cmd = f"az apim delete -n {resource_name} -g {rg_name} -y" + purge_cmd = f"az apim deletedservice purge --service-name {resource_name} --location \"{location}\"" + elif resource_type == 'keyvault': + delete_cmd = f"az keyvault delete -n {resource_name} -g {rg_name}" + purge_cmd = f"az keyvault purge -n {resource_name} --location \"{location}\"" + else: + return False, f"Unknown resource type: {resource_type}" + + # Execute delete + output = utils.run(delete_cmd, f"{resource_type} '{resource_name}' deleted", f"Failed to delete {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + if not output.success: + return False, f"Delete failed for {resource_name}" + + # Execute purge + output = utils.run(purge_cmd, f"{resource_type} '{resource_name}' purged", f"Failed to purge {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + if not output.success: + return False, f"Purge failed for {resource_name}" + + return True, "" + + except Exception as e: + return False, str(e) + + +def _cleanup_resources_parallel(resources: list[dict], thread_prefix: str = '', thread_color: str = '') -> None: + """ + Clean up multiple resources in parallel using ThreadPoolExecutor (orchestrator function). + + This function manages concurrent deletion and purging of Azure resources within a single resource group. + Can operate in two modes: regular printing or thread-safe printing (for when multiple RGs are being cleaned in parallel). + + Args: + resources (list[dict]): List of resources to clean up, each with keys: type, name, location, rg_name + thread_prefix (str, optional): Prefix for thread-safe logging (empty = regular printing) + thread_color (str, optional): ANSI color code for thread-safe logging + """ + if not resources: + return + + # Limit concurrent operations to avoid overwhelming Azure APIs + max_workers = min(len(resources), 5) + + # Determine if we need thread-safe printing + use_thread_safe_printing = bool(thread_prefix) + + # Helper function for thread-safe or regular printing + def log_info(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '👉🏽 ', thread_color) + else: + print_info(msg) + + def log_success(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color, show_time=True) + else: + print_success(msg) + + def log_error(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⛔ ', BOLD_R) + else: + print_error(msg) + + def log_ok(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color) + else: + print_ok(msg) + + def log_warning(msg): + if use_thread_safe_printing: + with _print_lock: + _print_log(f"{thread_prefix}{msg}", '⚠️ ', BOLD_Y) + else: + print_warning(msg) + + log_info(f'Starting parallel cleanup of {len(resources)} resource(s) with {max_workers} worker(s)...') + + completed_count = 0 + failed_count = 0 + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all cleanup tasks + future_to_resource = { + executor.submit(_cleanup_single_resource, resource): resource + for resource in resources + } + + # Wait for completion and track results + for future in as_completed(future_to_resource): + resource = future_to_resource[future] + try: + success, error_msg = future.result() + completed_count += 1 + + if success: + log_success(f"✓ Cleaned up {resource['type']} '{resource['name']}' ({completed_count}/{len(resources)})") + else: + failed_count += 1 + log_error(f"✗ Failed to clean up {resource['type']} '{resource['name']}': {error_msg}") + + except Exception as e: + failed_count += 1 + log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") + + # Summary + if failed_count == 0: + log_ok(f'All {len(resources)} resource(s) cleaned up successfully!') + else: + log_warning(f'Completed with {failed_count} failure(s) out of {len(resources)} total resources.') + if completed_count - failed_count > 0: + log_info(f'{completed_count - failed_count} resource(s) cleaned up successfully.') + + +def _cleanup_resources_parallel_thread_safe(resources: list[dict], thread_prefix: str, thread_color: str) -> None: + """ + Convenience wrapper for parallel cleanup with thread-safe printing. + + Args: + resources (list[dict]): List of resources to clean up + thread_prefix (str): Thread prefix for output formatting + thread_color (str): ANSI color code for this thread + """ + _cleanup_resources_parallel(resources, thread_prefix, thread_color) + + +def _cleanup_resources(deployment_name: str, rg_name: str) -> None: + """ + Clean up resources in a single resource group (main cleanup entry point for sequential mode). + + Lists all Azure resources (APIM, Key Vault, Cognitive Services) in a resource group, + then deletes and purges them in parallel before removing the resource group itself. + + Args: + deployment_name (str): The deployment name (string). + rg_name (str): The resource group name. + + Returns: + None + + Raises: + Exception: If an error occurs during cleanup. + """ + if not deployment_name: + print_error('Missing deployment name parameter.') + return + + if not rg_name: + print_error('Missing resource group name parameter.') + return + + try: + print_info(f'Resource group : {rg_name}') + + # Show the deployment details (if it exists) + output = utils.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Deployment not found (may be empty resource group)', print_command_to_run = False, print_errors = False) + + # Collect all resources that need to be deleted and purged + resources_to_cleanup = [] + + # List CognitiveService accounts + output = utils.run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'cognitiveservices', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List APIM resources + output = utils.run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'apim', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List Key Vault resources + output = utils.run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'keyvault', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # Delete and purge resources in parallel if there are any + if resources_to_cleanup: + print_info(f'Found {len(resources_to_cleanup)} resource(s) to clean up. Processing in parallel...') + _cleanup_resources_parallel(resources_to_cleanup) + else: + print_info('No resources found to clean up.') + + # Delete the resource group last (always attempt this, even if deployment doesn't exist) + print_message(f"Deleting resource group '{rg_name}'...") + output = utils.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) + + print_message('Cleanup completed.') + + except Exception as e: + print(f'An error occurred during cleanup: {e}') + traceback.print_exc() + + +def _cleanup_resources_thread_safe(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> tuple[bool, str]: + """ + Thread-safe wrapper for _cleanup_resources with formatted output. + + Args: + deployment_name (str): The deployment name (string). + rg_name (str): The resource group name. + thread_prefix (str): The thread prefix for output formatting. + thread_color (str): ANSI color code for this thread. + + Returns: + tuple[bool, str]: (success, error_message) + """ + try: + with _print_lock: + _print_log(f"{thread_prefix}Starting cleanup for resource group: {rg_name}", '👉🏽 ', thread_color) + + # Create a modified version of _cleanup_resources that uses thread-safe printing + _cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color) + + with _print_lock: + _print_log(f"{thread_prefix}Completed cleanup for resource group: {rg_name}", '👉🏽 ', thread_color) + + return True, "" + + except Exception as e: + error_msg = f'An error occurred during cleanup of {rg_name}: {str(e)}' + with _print_lock: + _print_log(f"{thread_prefix}{error_msg}", '⛔ ', BOLD_R, show_time=True) + traceback.print_exc() + return False, error_msg + + +def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> None: + """ + Clean up resources with thread-safe printing (internal implementation for parallel execution). + This is a modified version of _cleanup_resources that uses thread-safe output and parallel resource cleanup. + """ + if not deployment_name: + with _print_lock: + _print_log(f"{thread_prefix}Missing deployment name parameter.", '⛔ ', BOLD_R) + return + + if not rg_name: + with _print_lock: + _print_log(f"{thread_prefix}Missing resource group name parameter.", '⛔ ', BOLD_R) + return + + try: + with _print_lock: + _print_log(f"{thread_prefix}Resource group : {rg_name}", '👉🏽 ', thread_color) + + # Show the deployment details + output = utils.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) + + if output.success and output.json_data: + # Collect all resources that need to be deleted and purged + resources_to_cleanup = [] + + # List CognitiveService accounts + output = utils.run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'cognitiveservices', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List APIM resources + output = utils.run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'apim', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # List Key Vault resources + output = utils.run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + if output.success and output.json_data: + for resource in output.json_data: + resources_to_cleanup.append({ + 'type': 'keyvault', + 'name': resource['name'], + 'location': resource['location'], + 'rg_name': rg_name + }) + + # Delete and purge resources in parallel if there are any + if resources_to_cleanup: + with _print_lock: + _print_log(f"{thread_prefix}Found {len(resources_to_cleanup)} resource(s) to clean up. Processing in parallel...", '👉🏽 ', thread_color) + _cleanup_resources_parallel_thread_safe(resources_to_cleanup, thread_prefix, thread_color) + else: + with _print_lock: + _print_log(f"{thread_prefix}No resources found to clean up.", '👉🏽 ', thread_color) + + # Delete the resource group last + with _print_lock: + _print_log(f"{thread_prefix}Deleting resource group '{rg_name}'...", 'ℹ️ ', thread_color, show_time=True) + output = utils.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) + + with _print_lock: + _print_log(f"{thread_prefix}Cleanup completed.", 'ℹ️ ', thread_color, show_time=True) + + except Exception as e: + with _print_lock: + _print_log(f"{thread_prefix}An error occurred during cleanup: {e}", '⛔ ', BOLD_R) + traceback.print_exc() + + +def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[int] | None = None) -> None: + """ + Clean up infrastructure deployments by deployment enum and index/indexes. + Obtains the infra resource group name for each index and calls the private cleanup method. + For multiple indexes, runs cleanup operations in parallel for better performance. + + Args: + deployment (INFRASTRUCTURE): The infrastructure deployment enum value. + indexes (int | list[int] | None): A single index, a list of indexes, or None for no index. + """ + + if indexes is None: + indexes_list = [None] + elif isinstance(indexes, (list, tuple)): + indexes_list = list(indexes) + else: + indexes_list = [indexes] + + # If only one index, run sequentially (no need for threading overhead) + if len(indexes_list) <= 1: + idx = indexes_list[0] if indexes_list else None + print_info(f'Cleaning up resources for {deployment.value} - {idx}', True) + rg_name = utils.get_infra_rg_name(deployment, idx) + _cleanup_resources(deployment.value, rg_name) + return + + # For multiple indexes, run in parallel + print_info(f'Starting parallel cleanup for {len(indexes_list)} infrastructure instances', True) + print_info(f'Infrastructure: {deployment.value}') + print_info(f'Indexes: {indexes_list}') + print() + + # Determine max workers (reasonable limit to avoid overwhelming the system) + max_workers = min(len(indexes_list), 4) # Cap at 4 concurrent threads + + cleanup_tasks = [] + for i, idx in enumerate(indexes_list): + rg_name = utils.get_infra_rg_name(deployment, idx) + thread_color = THREAD_COLORS[i % len(THREAD_COLORS)] + thread_prefix = f"{thread_color}[{deployment.value}-{idx}]{RESET}: " + + cleanup_tasks.append({ + 'deployment_name': deployment.value, + 'rg_name': rg_name, + 'thread_prefix': thread_prefix, + 'thread_color': thread_color, + 'index': idx + }) + + # Execute cleanup tasks in parallel + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all tasks + future_to_task = { + executor.submit( + _cleanup_resources_thread_safe, + task['deployment_name'], + task['rg_name'], + task['thread_prefix'], + task['thread_color'] + ): task for task in cleanup_tasks + } + + # Track results + completed_count = 0 + failed_count = 0 + + # Wait for completion and handle results + for future in as_completed(future_to_task): + task = future_to_task[future] + try: + success, error_msg = future.result() + completed_count += 1 + + if success: + with _print_lock: + print_ok(f"Completed cleanup for {deployment.value}-{task['index']} ({completed_count}/{len(indexes_list)})") + else: + failed_count += 1 + with _print_lock: + print_error(f"❌ Failed cleanup for {deployment.value}-{task['index']}: {error_msg}") + + except Exception as e: + failed_count += 1 + with _print_lock: + print_error(f"❌ Exception during cleanup for {deployment.value}-{task['index']}: {str(e)}") + + # Final summary + if failed_count == 0: + print_ok(f'All {len(indexes_list)} infrastructure cleanups completed successfully!') + else: + print_warning(f'Completed with {failed_count} failures out of {len(indexes_list)} total cleanups.') + if completed_count > 0: + print_info(f'{completed_count} cleanups succeeded.') + + print_ok('All done!') diff --git a/shared/python/json_utils.py b/shared/python/json_utils.py new file mode 100644 index 0000000..71ceaf7 --- /dev/null +++ b/shared/python/json_utils.py @@ -0,0 +1,77 @@ +import json +import ast +from typing import Any + +def is_string_json(text: str) -> bool: + """ + Check if the provided string is a valid JSON object or array. + + Args: + text (str): The string to check. + + Returns: + bool: True if the string is valid JSON, False otherwise. + """ + + # Accept only str, bytes, or bytearray as valid input for JSON parsing. + if not isinstance(text, (str, bytes, bytearray)): + return False + + # Skip empty or whitespace-only strings + if not text or not text.strip(): + return False + + # First try JSON parsing (handles double quotes) + try: + json.loads(text) + return True + except json.JSONDecodeError: + pass + + # If JSON fails, try Python literal evaluation (handles single quotes) + try: + ast.literal_eval(text) + return True + except (ValueError, SyntaxError): + pass + + return False + +def extract_json(text: str) -> Any: + """ + Extract the first valid JSON object or array from a string and return it as a Python object. + + This function searches the input string for the first occurrence of a JSON object or array (delimited by '{' or '['), + and attempts to decode it using json.JSONDecoder().raw_decode. If the input is already valid JSON, it is returned as a Python object. + If no valid JSON is found, None is returned. + + Args: + text (str): The string to search for a JSON object or array. + + Returns: + Any | None: The extracted JSON as a Python object (dict or list), or None if not found or not valid. + """ + + if not isinstance(text, str): + return None + + # If the string is already valid JSON, parse and return it as a Python object. + if is_string_json(text): + try: + return json.loads(text) + except json.JSONDecodeError: + # If JSON parsing fails despite is_string_json returning True, + # fall through to substring search + pass + + decoder = json.JSONDecoder() + + for start in range(len(text)): + if text[start] in ('{', '['): + try: + obj, _ = decoder.raw_decode(text[start:]) + return obj + except Exception: + continue + + return None diff --git a/shared/python/utils.py b/shared/python/utils.py index 53b1ada..cdaf66c 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -14,15 +14,13 @@ import secrets import base64 import inspect -import threading -from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path import apimtypes import tempfile import os as temp_os from typing import Any, Optional, Tuple -from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, _get_project_root +from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, _get_project_root # ------------------------------ # RE-EXPORTS (BACKWARD COMPATIBILITY) @@ -81,157 +79,6 @@ def build_infrastructure_tags(infrastructure: str | INFRASTRUCTURE, custom_tags: # CLASSES # ------------------------------ -class Output(object): - """ - Represents the output of a command or deployment, including success status, raw text, and parsed JSON data. - """ - - # ------------------------------ - # CONSTRUCTOR - # ------------------------------ - - def __init__(self, success: bool, text: str): - """ - Initialize the Output object with command success status and output text. - Attempts to parse JSON from the output text. - """ - - self.success = success - self.text = text - self.jsonParseException = None - - # Check if the exact string is JSON. - if (is_string_json(text)): - try: - self.json_data = json.loads(text) - except json.JSONDecodeError as e: - self.jsonParseException = e - self.json_data = extract_json(text) - else: - # Check if a substring in the string is JSON. - self.json_data = extract_json(text) - - self.is_json = self.json_data is not None - - def get(self, key: str, label: str = '', secure: bool = False, suppress_logging: bool = False) -> str | None: - """ - Retrieve a deployment output property by key, with optional label and secure masking. - - Args: - key (str): The output key to retrieve. - label (str, optional): Optional label for logging. - secure (bool, optional): If True, masks the value in logs. - - Returns: - str | None: The value as a string, or None if not found. - """ - - try: - if not isinstance(self.json_data, dict): - raise KeyError('json_data is not a dict') - - if 'properties' in self.json_data: - properties = self.json_data.get('properties') - if not isinstance(properties, dict): - raise KeyError("'properties' is not a dict in deployment result") - - outputs = properties.get('outputs') - if not isinstance(outputs, dict): - raise KeyError("'outputs' is missing or not a dict in deployment result") - - output_entry = outputs.get(key) - if not isinstance(output_entry, dict) or 'value' not in output_entry: - raise KeyError(f"Output key '{key}' not found in deployment outputs") - - deployment_output = output_entry['value'] - elif key in self.json_data: - deployment_output = self.json_data[key]['value'] - - if not suppress_logging and label: - if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: - print_val(label, f'****{deployment_output[-4:]}') - else: - print_val(label, deployment_output) - - return str(deployment_output) - - except Exception as e: - error = f"Failed to retrieve output property: '{key}'\nError: {e}" - print_error(error) - - if label: - raise Exception(error) - - return None - - def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logging: bool = False) -> Any: - """ - Retrieve a deployment output property by key and return it as a JSON object. - This method is independent from get() and retrieves the raw deployment output value. - - Args: - key (str): The output key to retrieve. - label (str, optional): Optional label for logging. - secure (bool, optional): If True, masks the value in logs. - - Returns: - Any: The value as a JSON object (dict, list, etc.), or the original value if not JSON, or None if not found. - """ - - try: - if not isinstance(self.json_data, dict): - raise KeyError('json_data is not a dict') - - if 'properties' in self.json_data: - properties = self.json_data.get('properties') - if not isinstance(properties, dict): - raise KeyError("'properties' is not a dict in deployment result") - - outputs = properties.get('outputs') - if not isinstance(outputs, dict): - raise KeyError("'outputs' is missing or not a dict in deployment result") - - output_entry = outputs.get(key) - if not isinstance(output_entry, dict) or 'value' not in output_entry: - raise KeyError(f"Output key '{key}' not found in deployment outputs") - - deployment_output = output_entry['value'] - elif key in self.json_data: - deployment_output = self.json_data[key]['value'] - - if not suppress_logging and label: - if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: - print_val(label, f'****{deployment_output[-4:]}') - else: - print_val(label, deployment_output) - - # If the result is a string, try to parse it as JSON - if isinstance(deployment_output, str): - # First try JSON parsing (handles double quotes) - try: - return json.loads(deployment_output) - except json.JSONDecodeError: - pass - - # If JSON fails, try Python literal evaluation (handles single quotes) - try: - return ast.literal_eval(deployment_output) - except (ValueError, SyntaxError) as e: - print_error(f'Failed to parse deployment output as Python literal. Error: {e}') - pass - - # Return the original result if it's not a string or can't be parsed - return deployment_output - - except Exception as e: - error = f"Failed to retrieve output property: '{key}'\nError: {e}" - print_error(error) - - if label: - raise Exception(error) - - return None - class InfrastructureNotebookHelper: """ Helper class for managing infrastructure notebooks. @@ -695,283 +542,6 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: # ------------------------------ -# ------------------------------ -# INFRASTRUCTURE CLEANUP SYSTEM -# ------------------------------ -# -# This module provides a hierarchical cleanup system for Azure infrastructure with two levels of parallelization: -# -# LEVEL 1: Parallel Resource Group Cleanup -# - When cleaning multiple infrastructure instances (via cleanup_infra_deployments with multiple indexes), -# each resource group is cleaned up in parallel (up to 4 concurrent threads) -# - Each thread gets its own color-coded output for easy tracking -# -# LEVEL 2: Parallel Resource Cleanup (within each resource group) -# - Within each resource group, resources (APIM, Key Vault, Cognitive Services) are deleted and purged in parallel -# - Up to 5 resources can be processed concurrently per resource group -# - Significantly faster than sequential deletion, especially when resources have long delete/purge times -# -# Architecture: -# cleanup_infra_deployments() [Public API - manages multiple resource groups] -# ├─> _cleanup_resources_thread_safe() [Thread-safe wrapper for parallel RG cleanup] -# │ └─> _cleanup_resources_with_thread_safe_printing() -# │ └─> _cleanup_resources_parallel() -# │ -# └─> _cleanup_resources() [Direct call for single RG cleanup] -# └─> _cleanup_resources_parallel() -# -# _cleanup_resources_parallel() [Orchestrates parallel resource deletion] -# └─> _cleanup_single_resource() [Worker: deletes and purges one resource] -# -# Performance Impact: -# - Old approach: Resources deleted sequentially (6 minutes for 3 resources @ 2min each) -# - New approach: Resources deleted in parallel (~2 minutes for 3 resources) -# - With multiple resource groups: Compound parallelization benefits -# -# ------------------------------ - -def _cleanup_single_resource(resource: dict) -> tuple[bool, str]: - """ - Delete and purge a single Azure resource (worker function for parallel cleanup). - - This is the atomic unit of work that deletes and purges one resource. - Called by _cleanup_resources_parallel() which manages multiple resources concurrently. - - Args: - resource (dict): Resource information with keys: type, name, location, rg_name - - Returns: - tuple[bool, str]: (success, error_message) - """ - try: - resource_type = resource['type'] - resource_name = resource['name'] - rg_name = resource['rg_name'] - location = resource['location'] - - print_info(f"Deleting and purging {resource_type} '{resource_name}'...") - - # Delete the resource - if resource_type == 'cognitiveservices': - delete_cmd = f"az cognitiveservices account delete -g {rg_name} -n {resource_name}" - purge_cmd = f"az cognitiveservices account purge -g {rg_name} -n {resource_name} --location \"{location}\"" - elif resource_type == 'apim': - delete_cmd = f"az apim delete -n {resource_name} -g {rg_name} -y" - purge_cmd = f"az apim deletedservice purge --service-name {resource_name} --location \"{location}\"" - elif resource_type == 'keyvault': - delete_cmd = f"az keyvault delete -n {resource_name} -g {rg_name}" - purge_cmd = f"az keyvault purge -n {resource_name} --location \"{location}\"" - else: - return False, f"Unknown resource type: {resource_type}" - - # Execute delete - output = run(delete_cmd, f"{resource_type} '{resource_name}' deleted", f"Failed to delete {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) - if not output.success: - return False, f"Delete failed for {resource_name}" - - # Execute purge - output = run(purge_cmd, f"{resource_type} '{resource_name}' purged", f"Failed to purge {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) - if not output.success: - return False, f"Purge failed for {resource_name}" - - return True, "" - - except Exception as e: - return False, str(e) - - -def _cleanup_resources_parallel(resources: list[dict], thread_prefix: str = '', thread_color: str = '') -> None: - """ - Clean up multiple resources in parallel using ThreadPoolExecutor (orchestrator function). - - This function manages concurrent deletion and purging of Azure resources within a single resource group. - Can operate in two modes: regular printing or thread-safe printing (for when multiple RGs are being cleaned in parallel). - - Args: - resources (list[dict]): List of resources to clean up, each with keys: type, name, location, rg_name - thread_prefix (str, optional): Prefix for thread-safe logging (empty = regular printing) - thread_color (str, optional): ANSI color code for thread-safe logging - """ - if not resources: - return - - # Limit concurrent operations to avoid overwhelming Azure APIs - max_workers = min(len(resources), 5) - - # Determine if we need thread-safe printing - use_thread_safe_printing = bool(thread_prefix) - - # Helper function for thread-safe or regular printing - def log_info(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '👉🏽 ', thread_color) - else: - print_info(msg) - - def log_success(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color, show_time=True) - else: - print_success(msg) - - def log_error(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '⛔ ', BOLD_R) - else: - print_error(msg) - - def log_ok(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '✅ ', thread_color) - else: - print_ok(msg) - - def log_warning(msg): - if use_thread_safe_printing: - with _print_lock: - _print_log(f"{thread_prefix}{msg}", '⚠️ ', BOLD_Y) - else: - print_warning(msg) - - log_info(f'Starting parallel cleanup of {len(resources)} resources with {max_workers} workers...') - - completed_count = 0 - failed_count = 0 - - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit all cleanup tasks - future_to_resource = { - executor.submit(_cleanup_single_resource, resource): resource - for resource in resources - } - - # Wait for completion and track results - for future in as_completed(future_to_resource): - resource = future_to_resource[future] - try: - success, error_msg = future.result() - completed_count += 1 - - if success: - log_success(f"✓ Cleaned up {resource['type']} '{resource['name']}' ({completed_count}/{len(resources)})") - else: - failed_count += 1 - log_error(f"✗ Failed to clean up {resource['type']} '{resource['name']}': {error_msg}") - - except Exception as e: - failed_count += 1 - log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") - - # Summary - if failed_count == 0: - log_ok(f'All {len(resources)} resources cleaned up successfully!') - else: - log_warning(f'Completed with {failed_count} failures out of {len(resources)} total resources.') - if completed_count - failed_count > 0: - log_info(f'{completed_count - failed_count} resources cleaned up successfully.') - - -def _cleanup_resources_parallel_thread_safe(resources: list[dict], thread_prefix: str, thread_color: str) -> None: - """ - Convenience wrapper for parallel cleanup with thread-safe printing. - - Args: - resources (list[dict]): List of resources to clean up - thread_prefix (str): Thread prefix for output formatting - thread_color (str): ANSI color code for this thread - """ - _cleanup_resources_parallel(resources, thread_prefix, thread_color) - - -def _cleanup_resources(deployment_name: str, rg_name: str) -> None: - """ - Clean up resources in a single resource group (main cleanup entry point for sequential mode). - - Lists all Azure resources (APIM, Key Vault, Cognitive Services) in a resource group, - then deletes and purges them in parallel before removing the resource group itself. - - Args: - deployment_name (str): The deployment name (string). - rg_name (str): The resource group name. - - Returns: - None - - Raises: - Exception: If an error occurs during cleanup. - """ - if not deployment_name: - print_error('Missing deployment name parameter.') - return - - if not rg_name: - print_error('Missing resource group name parameter.') - return - - try: - print_info(f'Resource group : {rg_name}') - - # Show the deployment details (if it exists) - output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Deployment not found (may be empty resource group)', print_command_to_run = False, print_errors = False) - - # Collect all resources that need to be deleted and purged - resources_to_cleanup = [] - - # List CognitiveService accounts - output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'cognitiveservices', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # List APIM resources - output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'apim', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # List Key Vault resources - output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'keyvault', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # Delete and purge resources in parallel if there are any - if resources_to_cleanup: - print_info(f'Found {len(resources_to_cleanup)} resources to clean up. Processing in parallel...') - _cleanup_resources_parallel(resources_to_cleanup) - else: - print_info('No resources found to clean up.') - - # Delete the resource group last (always attempt this, even if deployment doesn't exist) - print_message(f"Deleting resource group '{rg_name}'...") - output = run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) - - print_message('Cleanup completed.') - - except Exception as e: - print(f'An error occurred during cleanup: {e}') - traceback.print_exc() - - def _determine_bicep_directory(infrastructure_dir: str) -> str: """ Determine the correct Bicep directory based on the current working directory and infrastructure directory name. @@ -1495,218 +1065,6 @@ def read_policy_xml(policy_xml_filepath_or_filename: str, named_values: dict[str return policy_template_xml - -def _cleanup_resources_thread_safe(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> tuple[bool, str]: - """ - Thread-safe wrapper for _cleanup_resources with formatted output. - - Args: - deployment_name (str): The deployment name (string). - rg_name (str): The resource group name. - thread_prefix (str): The thread prefix for output formatting. - thread_color (str): ANSI color code for this thread. - - Returns: - tuple[bool, str]: (success, error_message) - """ - try: - with _print_lock: - _print_log(f"{thread_prefix}Starting cleanup for resource group: {rg_name}", '👉🏽 ', thread_color) - - # Create a modified version of _cleanup_resources that uses thread-safe printing - _cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color) - - with _print_lock: - _print_log(f"{thread_prefix}Completed cleanup for resource group: {rg_name}", '👉🏽 ', thread_color) - - return True, "" - - except Exception as e: - error_msg = f'An error occurred during cleanup of {rg_name}: {str(e)}' - with _print_lock: - _print_log(f"{thread_prefix}{error_msg}", '⛔ ', BOLD_R, show_time=True) - traceback.print_exc() - return False, error_msg - - -def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> None: - """ - Clean up resources with thread-safe printing (internal implementation for parallel execution). - This is a modified version of _cleanup_resources that uses thread-safe output and parallel resource cleanup. - """ - if not deployment_name: - with _print_lock: - _print_log(f"{thread_prefix}Missing deployment name parameter.", '⛔ ', BOLD_R) - return - - if not rg_name: - with _print_lock: - _print_log(f"{thread_prefix}Missing resource group name parameter.", '⛔ ', BOLD_R) - return - - try: - with _print_lock: - _print_log(f"{thread_prefix}Resource group : {rg_name}", '👉🏽 ', thread_color) - - # Show the deployment details - output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) - - if output.success and output.json_data: - # Collect all resources that need to be deleted and purged - resources_to_cleanup = [] - - # List CognitiveService accounts - output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'cognitiveservices', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # List APIM resources - output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'apim', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # List Key Vault resources - output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) - if output.success and output.json_data: - for resource in output.json_data: - resources_to_cleanup.append({ - 'type': 'keyvault', - 'name': resource['name'], - 'location': resource['location'], - 'rg_name': rg_name - }) - - # Delete and purge resources in parallel if there are any - if resources_to_cleanup: - with _print_lock: - _print_log(f"{thread_prefix}Found {len(resources_to_cleanup)} resources to clean up. Processing in parallel...", '👉🏽 ', thread_color) - _cleanup_resources_parallel_thread_safe(resources_to_cleanup, thread_prefix, thread_color) - else: - with _print_lock: - _print_log(f"{thread_prefix}No resources found to clean up.", '👉🏽 ', thread_color) - - # Delete the resource group last - with _print_lock: - _print_log(f"{thread_prefix}Deleting resource group '{rg_name}'...", 'ℹ️ ', thread_color, show_time=True) - output = run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) - - with _print_lock: - _print_log(f"{thread_prefix}Cleanup completed.", 'ℹ️ ', thread_color, show_time=True) - - except Exception as e: - with _print_lock: - _print_log(f"{thread_prefix}An error occurred during cleanup: {e}", '⛔ ', BOLD_R) - traceback.print_exc() - - -def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[int] | None = None) -> None: - """ - Clean up infrastructure deployments by deployment enum and index/indexes. - Obtains the infra resource group name for each index and calls the private cleanup method. - For multiple indexes, runs cleanup operations in parallel for better performance. - - Args: - deployment (INFRASTRUCTURE): The infrastructure deployment enum value. - indexes (int | list[int] | None): A single index, a list of indexes, or None for no index. - """ - - if indexes is None: - indexes_list = [None] - elif isinstance(indexes, (list, tuple)): - indexes_list = list(indexes) - else: - indexes_list = [indexes] - - # If only one index, run sequentially (no need for threading overhead) - if len(indexes_list) <= 1: - idx = indexes_list[0] if indexes_list else None - print_info(f'Cleaning up resources for {deployment.value} - {idx}', True) - rg_name = get_infra_rg_name(deployment, idx) - _cleanup_resources(deployment.value, rg_name) - return - - # For multiple indexes, run in parallel - print_info(f'Starting parallel cleanup for {len(indexes_list)} infrastructure instances', True) - print_info(f'Infrastructure: {deployment.value}') - print_info(f'Indexes: {indexes_list}') - print() - - # Determine max workers (reasonable limit to avoid overwhelming the system) - max_workers = min(len(indexes_list), 4) # Cap at 4 concurrent threads - - cleanup_tasks = [] - for i, idx in enumerate(indexes_list): - rg_name = get_infra_rg_name(deployment, idx) - thread_color = THREAD_COLORS[i % len(THREAD_COLORS)] - thread_prefix = f"{thread_color}[{deployment.value}-{idx}]{RESET}: " - - cleanup_tasks.append({ - 'deployment_name': deployment.value, - 'rg_name': rg_name, - 'thread_prefix': thread_prefix, - 'thread_color': thread_color, - 'index': idx - }) - - # Execute cleanup tasks in parallel - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Submit all tasks - future_to_task = { - executor.submit( - _cleanup_resources_thread_safe, - task['deployment_name'], - task['rg_name'], - task['thread_prefix'], - task['thread_color'] - ): task for task in cleanup_tasks - } - - # Track results - completed_count = 0 - failed_count = 0 - - # Wait for completion and handle results - for future in as_completed(future_to_task): - task = future_to_task[future] - try: - success, error_msg = future.result() - completed_count += 1 - - if success: - with _print_lock: - print_ok(f"Completed cleanup for {deployment.value}-{task['index']} ({completed_count}/{len(indexes_list)})") - else: - failed_count += 1 - with _print_lock: - print_error(f"❌ Failed cleanup for {deployment.value}-{task['index']}: {error_msg}") - - except Exception as e: - failed_count += 1 - with _print_lock: - print_error(f"❌ Exception during cleanup for {deployment.value}-{task['index']}: {str(e)}") - - # Final summary - if failed_count == 0: - print_ok(f'All {len(indexes_list)} infrastructure cleanups completed successfully!') - else: - print_warning(f'Completed with {failed_count} failures out of {len(indexes_list)} total cleanups.') - if completed_count > 0: - print_info(f'{completed_count} cleanups succeeded.') - - print_ok('All done!') - def extract_json(text: str) -> Any: """ Extract the first valid JSON object or array from a string and return it as a Python object. diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index a5264e9..02df7a8 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -4,9 +4,10 @@ from unittest.mock import Mock, patch, MagicMock import pytest - +import console import infrastructures -from apimtypes import INFRASTRUCTURE, APIM_SKU, APIMNetworkMode, API, PolicyFragment, HTTP_VERB +from apimtypes import INFRASTRUCTURE, APIM_SKU, APIMNetworkMode, API, PolicyFragment, HTTP_VERB, Output +import utils # ------------------------------ @@ -947,3 +948,672 @@ def test_policy_fragment_creation_robustness(mock_utils): assert '' in policy_xmls assert '' in policy_xmls assert '' in policy_xmls + + +# ------------------------------ +# cleanup_resources (smoke) +# ------------------------------ + +def test_cleanup_resources_smoke(monkeypatch): + monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=True, json_data={})) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_val', lambda *a, **kw: None) + # Direct private method call for legacy test (should still work) + infrastructures._cleanup_resources(INFRASTRUCTURE.SIMPLE_APIM.value, 'rg') + + +def test_cleanup_resources_missing_parameters(monkeypatch): + """Test _cleanup_resources with missing parameters.""" + print_calls = [] + + def mock_print_error(message, *args, **kwargs): + print_calls.append(message) + + monkeypatch.setattr(infrastructures, 'print_error', mock_print_error) + + # Test missing deployment name + infrastructures._cleanup_resources('', 'valid-rg') + assert 'Missing deployment name parameter.' in print_calls + + # Test missing resource group name + print_calls.clear() + infrastructures._cleanup_resources('valid-deployment', '') + assert 'Missing resource group name parameter.' in print_calls + + # Test None deployment name + print_calls.clear() + infrastructures._cleanup_resources(None, 'valid-rg') + assert 'Missing deployment name parameter.' in print_calls + + # Test None resource group name + print_calls.clear() + infrastructures._cleanup_resources('valid-deployment', None) + assert 'Missing resource group name parameter.' in print_calls + + +def test_cleanup_resources_with_resources(monkeypatch): + """Test _cleanup_resources with various resource types present.""" + run_commands = [] + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + run_commands.append(command) + + # Mock deployment show response + if 'deployment group show' in command: + return Output(success=True, text='{"properties": {"provisioningState": "Succeeded"}}') + + # Mock cognitive services list response + if 'cognitiveservices account list' in command: + return Output(success=True, text='[{"name": "cog-service-1", "location": "eastus"}, {"name": "cog-service-2", "location": "westus"}]') + + # Mock APIM list response + if 'apim list' in command: + return Output(success=True, text='[{"name": "apim-service-1", "location": "eastus"}, {"name": "apim-service-2", "location": "westus"}]') + + # Mock Key Vault list response + if 'keyvault list' in command: + return Output(success=True, text='[{"name": "kv-vault-1", "location": "eastus"}, {"name": "kv-vault-2", "location": "westus"}]') + + # Default successful response for delete/purge operations + return Output(success=True, text='Operation completed') + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + + # Execute cleanup + infrastructures._cleanup_resources('test-deployment', 'test-rg') + + # Verify all expected commands were called + command_patterns = [ + 'az deployment group show --name test-deployment -g test-rg', + 'az cognitiveservices account list -g test-rg', + 'az cognitiveservices account delete -g test-rg -n cog-service-1', + 'az cognitiveservices account purge -g test-rg -n cog-service-1 --location "eastus"', + 'az cognitiveservices account delete -g test-rg -n cog-service-2', + 'az cognitiveservices account purge -g test-rg -n cog-service-2 --location "westus"', + 'az apim list -g test-rg', + 'az apim delete -n apim-service-1 -g test-rg -y', + 'az apim deletedservice purge --service-name apim-service-1 --location "eastus"', + 'az apim delete -n apim-service-2 -g test-rg -y', + 'az apim deletedservice purge --service-name apim-service-2 --location "westus"', + 'az keyvault list -g test-rg', + 'az keyvault delete -n kv-vault-1 -g test-rg', + 'az keyvault purge -n kv-vault-1 --location "eastus"', + 'az keyvault delete -n kv-vault-2 -g test-rg', + 'az keyvault purge -n kv-vault-2 --location "westus"', + 'az group delete --name test-rg -y' + ] + + for pattern in command_patterns: + assert any(pattern in cmd for cmd in run_commands), f"Expected command pattern not found: {pattern}" + + +def test_cleanup_resources_no_resources(monkeypatch): + """Test _cleanup_resources when no resources exist.""" + run_commands = [] + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + run_commands.append(command) + + # Mock deployment show response + if 'deployment group show' in command: + return Output(success=True, text='{"properties": {"provisioningState": "Succeeded"}}') + + # Mock empty resource lists + if any(x in command for x in ['cognitiveservices account list', 'apim list', 'keyvault list']): + return Output(success=True, text='[]') + + # Default successful response + return Output(success=True, text='Operation completed') + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + + # Execute cleanup + infrastructures._cleanup_resources('test-deployment', 'test-rg') + + # Verify only listing and resource group deletion commands were called + expected_commands = [ + 'az deployment group show --name test-deployment -g test-rg', + 'az cognitiveservices account list -g test-rg', + 'az apim list -g test-rg', + 'az keyvault list -g test-rg', + 'az group delete --name test-rg -y' + ] + + for expected in expected_commands: + assert any(expected in cmd for cmd in run_commands), f"Expected command not found: {expected}" + + # Verify no delete/purge commands for individual resources + delete_purge_patterns = ['delete -n', 'purge -n', 'deletedservice purge'] + for pattern in delete_purge_patterns: + assert not any(pattern in cmd for cmd in run_commands), f"Unexpected delete/purge command found: {pattern}" + + +def test_cleanup_resources_command_failures(monkeypatch): + """Test _cleanup_resources when commands fail.""" + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + # Mock deployment show failure + if 'deployment group show' in command: + return Output(success=False, text='Deployment not found') + + # All other commands succeed + return Output(success=True, json_data=[]) + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + + # Should not raise exception even when deployment show fails + infrastructures._cleanup_resources('test-deployment', 'test-rg') + + +def test_cleanup_resources_exception_handling(monkeypatch): + """Test _cleanup_resources exception handling.""" + exception_caught = [] + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + raise Exception("Simulated Azure CLI error") + + def mock_print(message): + exception_caught.append(message) + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + monkeypatch.setattr('builtins.print', mock_print) + monkeypatch.setattr('traceback.print_exc', lambda: None) + + # Should handle exception gracefully + infrastructures._cleanup_resources('test-deployment', 'test-rg') + + # Verify exception was caught and printed + assert any('An error occurred during cleanup:' in msg for msg in exception_caught) + +def test_cleanup_infra_deployment_single(monkeypatch): + monkeypatch.setattr(infrastructures, '_cleanup_resources', lambda deployment_name, rg_name: None) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) # Single item in list should use sequential mode + + +def test_cleanup_infra_deployments_parallel_mode(monkeypatch): + """Test cleanup_infra_deployments with multiple indexes using parallel execution.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" # Return success + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' + + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Test with multiple indexes (should use parallel mode) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) + + # Verify all cleanup calls were made + assert len(cleanup_calls) == 3 + + # Check that the correct resource groups were targeted + expected_rgs = [ + 'apim-infra-simple-apim-1', + 'apim-infra-simple-apim-2', + 'apim-infra-simple-apim-3' + ] + actual_rgs = [call[1] for call in cleanup_calls] + assert set(actual_rgs) == set(expected_rgs) + + # Check that thread prefixes contain the correct infrastructure and index info + for deployment_name, _rg_name, thread_prefix, thread_color in cleanup_calls: + assert deployment_name == 'simple-apim' + assert 'simple-apim' in thread_prefix + assert thread_color in console.THREAD_COLORS + + +def test_cleanup_infra_deployments_parallel_with_failures(monkeypatch): + """Test parallel cleanup handling when some threads fail.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name)) + # Simulate failure for index 2 + if 'simple-apim-2' in rg_name: + return False, "Simulated failure for testing" + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' + + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) + + # Test with multiple indexes where one fails + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) + + # Verify all cleanup attempts were made despite failure + assert len(cleanup_calls) == 3 + + +def test_cleanup_resources_thread_safe_success(monkeypatch): + """Test the thread-safe cleanup wrapper with successful execution.""" + original_calls = [] + + def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): + original_calls.append((deployment_name, rg_name)) + + monkeypatch.setattr(infrastructures, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) + + # Test successful cleanup + success, error_msg = infrastructures._cleanup_resources_thread_safe( + 'test-deployment', 'test-rg', '[TEST]: ', console.BOLD_G + ) + + assert success is True + assert not error_msg + assert len(original_calls) == 1 + assert original_calls[0] == ('test-deployment', 'test-rg') + + +def test_cleanup_resources_thread_safe_failure(monkeypatch): + """Test the thread-safe cleanup wrapper with exception handling.""" + def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): + raise Exception("Simulated cleanup failure") + + monkeypatch.setattr(infrastructures, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) + + # Test failed cleanup + success, error_msg = infrastructures._cleanup_resources_thread_safe( + 'test-deployment', 'test-rg', '[TEST]: ', console.BOLD_G + ) + + assert success is False + assert "Simulated cleanup failure" in error_msg + + +def test_cleanup_infra_deployments_max_workers_limit(monkeypatch): + """Test that parallel cleanup properly handles different numbers of indexes.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'rg-{deployment.value}-{index}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return Output(success=True, text='{}') + + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Test with 6 indexes (should use parallel mode and handle all indexes) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3, 4, 5, 6]) + + # Verify all 6 cleanup calls were made + assert len(cleanup_calls) == 6, f"Expected 6 cleanup calls, got {len(cleanup_calls)}" + + # Check that the correct resource groups were targeted + expected_rgs = [f'rg-simple-apim-{i}' for i in range(1, 7)] + actual_rgs = [call[1] for call in cleanup_calls] + assert set(actual_rgs) == set(expected_rgs), f"Expected RGs {expected_rgs}, got {actual_rgs}" + + # Test with 2 indexes (should use parallel mode) + cleanup_calls.clear() + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2]) + + assert len(cleanup_calls) == 2, f"Expected 2 cleanup calls, got {len(cleanup_calls)}" + + # Test that thread prefixes and colors are assigned properly + for call in cleanup_calls: + deployment_name, _rg_name, thread_prefix, thread_color = call + assert deployment_name == 'simple-apim' + assert 'simple-apim' in thread_prefix + assert thread_color in console.THREAD_COLORS + + +def test_cleanup_infra_deployments_thread_color_assignment(monkeypatch): + """Test that thread colors are assigned correctly and cycle through available colors.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return Output(success=True, text='{}') + + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Test with more indexes than available colors to verify cycling + num_colors = len(console.THREAD_COLORS) + test_indexes = list(range(1, num_colors + 3)) # More than available colors + + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, test_indexes) + + # Sort the calls by the index extracted from the rg_name to check in deterministic order + cleanup_calls_sorted = sorted(cleanup_calls, key=lambda x: int(x[1].split('-')[-1])) + assigned_colors_sorted = [call[3] for call in cleanup_calls_sorted] + + # First num_colors should use each color once + for i in range(num_colors): + expected_color = console.THREAD_COLORS[i % num_colors] + assert assigned_colors_sorted[i] == expected_color + + # Additional colors should cycle back to the beginning + if len(assigned_colors_sorted) > num_colors: + assert assigned_colors_sorted[num_colors] == console.THREAD_COLORS[0] + assert assigned_colors_sorted[num_colors + 1] == console.THREAD_COLORS[1] + + +def test_cleanup_infra_deployments_all_infrastructure_types(monkeypatch): + """Test cleanup_infra_deployments with all infrastructure types.""" + cleanup_calls = [] + + def mock_cleanup_resources(deployment_name, rg_name): + cleanup_calls.append((deployment_name, rg_name)) + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' + + monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + + # Test all infrastructure types + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, 2) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, 3) + + # Verify correct calls were made + assert ('simple-apim', 'apim-infra-simple-apim-1') in cleanup_calls + assert ('apim-aca', 'apim-infra-apim-aca-2') in cleanup_calls + assert ('afd-apim-pe', 'apim-infra-afd-apim-pe-3') in cleanup_calls + + +def test_cleanup_infra_deployments_index_scenarios(monkeypatch): + """Test cleanup_infra_deployments with various index scenarios.""" + cleanup_calls = [] + thread_safe_calls = [] + + def mock_cleanup_resources(deployment_name, rg_name): + cleanup_calls.append((deployment_name, rg_name)) + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + thread_safe_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return Output(success=True, text='{}') + + monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Test None index (sequential) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) + + # Test single integer index (sequential) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 5) + + # Test single item list (sequential) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) + + # Test list of integers (parallel) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [2, 3]) + + # Test tuple of integers (parallel) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, (4, 5)) + + # Test empty list (sequential, with no index) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, []) + + # Verify sequential calls + expected_sequential_calls = [ + ('simple-apim', 'apim-infra-simple-apim'), # None index + ('simple-apim', 'apim-infra-simple-apim-5'), # Single index 5 + ('simple-apim', 'apim-infra-simple-apim-1'), # Single item list [1] + ('simple-apim', 'apim-infra-simple-apim'), # Empty list (None index) + ] + + for expected_call in expected_sequential_calls: + assert expected_call in cleanup_calls, f"Expected sequential call {expected_call} not found in {cleanup_calls}" + + # Verify parallel calls (extract just the deployment and rg_name parts) + parallel_calls = [(call[0], call[1]) for call in thread_safe_calls] + expected_parallel_calls = [ + ('simple-apim', 'apim-infra-simple-apim-2'), # List [2, 3] - first + ('simple-apim', 'apim-infra-simple-apim-3'), # List [2, 3] - second + ('simple-apim', 'apim-infra-simple-apim-4'), # Tuple (4, 5) - first + ('simple-apim', 'apim-infra-simple-apim-5'), # Tuple (4, 5) - second + ] + + for expected_call in expected_parallel_calls: + assert expected_call in parallel_calls, f"Expected parallel call {expected_call} not found in {parallel_calls}" + + +def test_cleanup_functions_comprehensive(monkeypatch): + """Test cleanup functions with various scenarios.""" + run_commands = [] + + def mock_run(command, ok_message='', error_message='', print_output=False, print_command_to_run=True, print_errors=True, print_warnings=True): + run_commands.append(command) + + # Return appropriate mock responses + if 'deployment group show' in command: + return Output(success=True, json_data={ + 'properties': {'provisioningState': 'Succeeded'} + }) + + # Return empty lists for resource queries to avoid complex mocking + if any(x in command for x in ['list -g', 'list']): + return Output(success=True, json_data=[]) + + return Output(success=True, text='{}') + + def mock_get_infra_rg_name(deployment, index): + return f'test-rg-{deployment.value}-{index}' if index else f'test-rg-{deployment.value}' + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + + # Test _cleanup_resources (private function) + infrastructures._cleanup_resources('test-deployment', 'test-rg') # Should not raise + + # Test cleanup_infra_deployments with INFRASTRUCTURE enum (correct function name and parameter type) + + # Test with all infrastructure types + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, 1) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, [1, 2]) + + # Verify commands were executed + assert len(run_commands) > 0 + + +def test_cleanup_edge_cases_comprehensive(monkeypatch): + """Test cleanup functions with edge cases and error conditions.""" + + # Test with different index types + cleanup_calls = [] + + def mock_cleanup_resources(deployment_name, rg_name): + cleanup_calls.append((deployment_name, rg_name)) + return True, "" + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'rg-{deployment.value}-{index}' if index is not None else f'rg-{deployment.value}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return Output(success=True, text='{}') + + monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) + monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Test with zero index (single index, uses sequential path) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 0) + assert ('simple-apim', 'rg-simple-apim-0') in cleanup_calls + + # Test with negative index (single index, uses sequential path) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, -1) + assert ('simple-apim', 'rg-simple-apim--1') in cleanup_calls + + # Test with large index (single index, uses sequential path) + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 9999) + assert ('simple-apim', 'rg-simple-apim-9999') in cleanup_calls + + # Test with mixed positive and negative indexes in list (multiple indexes, uses parallel path) + cleanup_calls.clear() + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, [-1, 0, 1]) + expected = [ + ('apim-aca', 'rg-apim-aca--1'), + ('apim-aca', 'rg-apim-aca-0'), + ('apim-aca', 'rg-apim-aca-1') + ] + for call in expected: + assert call in cleanup_calls # Test with single-item list + cleanup_calls.clear() + infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, [42]) + assert ('afd-apim-pe', 'rg-afd-apim-pe-42') in cleanup_calls + + +def test_cleanup_resources_partial_failures(monkeypatch): + """Test _cleanup_resources when some operations fail with parallel cleanup.""" + run_commands = [] + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + run_commands.append(command) + + # Mock deployment show response + if 'deployment group show' in command: + return Output(success=True, text='{"properties": {"provisioningState": "Failed"}}') + + # Mock resources exist + if 'cognitiveservices account list' in command: + return Output(success=True, text='[{"name": "cog-service-1", "location": "eastus"}]') + + if 'apim list' in command: + return Output(success=True, text='[{"name": "apim-service-1", "location": "eastus"}]') + + if 'keyvault list' in command: + return Output(success=True, text='[{"name": "kv-vault-1", "location": "eastus"}]') + + # Simulate failure for delete operations but success for purge + if 'delete' in command and ('cognitiveservices' in command or 'apim delete' in command or 'keyvault delete' in command): + return Output(success=False, text='Delete failed') + + # Simulate failure for purge operations + if 'purge' in command: + return Output(success=False, text='Purge failed') + + # Resource group deletion succeeds + return Output(success=True, text='Operation completed') + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + monkeypatch.setattr(console, 'print_success', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + + # Should not raise exception even when individual operations fail + infrastructures._cleanup_resources('test-deployment', 'test-rg') + + # Verify all listing and group operations were attempted + # Note: With parallel cleanup, if delete fails, purge is not attempted (expected behavior) + expected_patterns = [ + 'deployment group show', + 'cognitiveservices account list', + 'apim list', + 'keyvault list', + 'group delete' + ] + + for pattern in expected_patterns: + assert any(pattern in cmd for cmd in run_commands), f"Expected command pattern not found: {pattern}" + + # Verify delete attempts were made (even though they failed) + delete_patterns = [ + 'cognitiveservices account delete', + 'apim delete', + 'keyvault delete' + ] + + for pattern in delete_patterns: + assert any(pattern in cmd for cmd in run_commands), f"Expected delete command pattern not found: {pattern}" + + +def test_cleanup_resources_malformed_responses(monkeypatch): + """Test _cleanup_resources with malformed API responses.""" + + def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): + + # Mock deployment show with missing properties + if 'deployment group show' in command: + return Output(success=True, text='{}') + + # Mock malformed resource responses (missing required fields) + if 'cognitiveservices account list' in command: + return Output(success=True, text='[{"name": "cog-service-1"}, {"location": "eastus"}, {}]') + + if 'apim list' in command: + return Output(success=True, text='[{"name": "apim-service-1"}, {"location": "eastus"}]') + + if 'keyvault list' in command: + return Output(success=True, text='[{"name": "kv-vault-1"}]') + + # Default response for delete/purge operations + return Output(success=True, text='Operation completed') + + monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + + # Should handle malformed responses gracefully without raising exceptions + infrastructures._cleanup_resources('test-deployment', 'test-rg') diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 909d7da..a25b306 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -233,481 +233,6 @@ def mock_inspect_currentframe(): with pytest.raises(ValueError, match='Could not auto-detect sample name'): utils.read_policy_xml('policy.xml', {'key': 'value'}) -# ------------------------------ -# cleanup_resources (smoke) -# ------------------------------ - -def test_cleanup_resources_smoke(monkeypatch): - monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=True, json_data={})) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_warning', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_val', lambda *a, **kw: None) - # Direct private method call for legacy test (should still work) - utils._cleanup_resources(INFRASTRUCTURE.SIMPLE_APIM.value, 'rg') - - -def test_cleanup_resources_missing_parameters(monkeypatch): - """Test _cleanup_resources with missing parameters.""" - print_calls = [] - - def mock_print_error(message, *args, **kwargs): - print_calls.append(message) - - monkeypatch.setattr(utils, 'print_error', mock_print_error) - - # Test missing deployment name - utils._cleanup_resources('', 'valid-rg') - assert 'Missing deployment name parameter.' in print_calls - - # Test missing resource group name - print_calls.clear() - utils._cleanup_resources('valid-deployment', '') - assert 'Missing resource group name parameter.' in print_calls - - # Test None deployment name - print_calls.clear() - utils._cleanup_resources(None, 'valid-rg') - assert 'Missing deployment name parameter.' in print_calls - - # Test None resource group name - print_calls.clear() - utils._cleanup_resources('valid-deployment', None) - assert 'Missing resource group name parameter.' in print_calls - - -def test_cleanup_resources_with_resources(monkeypatch): - """Test _cleanup_resources with various resource types present.""" - run_commands = [] - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - run_commands.append(command) - - # Mock deployment show response - if 'deployment group show' in command: - return utils.Output(success=True, text='{"properties": {"provisioningState": "Succeeded"}}') - - # Mock cognitive services list response - if 'cognitiveservices account list' in command: - return utils.Output(success=True, text='[{"name": "cog-service-1", "location": "eastus"}, {"name": "cog-service-2", "location": "westus"}]') - - # Mock APIM list response - if 'apim list' in command: - return utils.Output(success=True, text='[{"name": "apim-service-1", "location": "eastus"}, {"name": "apim-service-2", "location": "westus"}]') - - # Mock Key Vault list response - if 'keyvault list' in command: - return utils.Output(success=True, text='[{"name": "kv-vault-1", "location": "eastus"}, {"name": "kv-vault-2", "location": "westus"}]') - - # Default successful response for delete/purge operations - return utils.Output(success=True, text='Operation completed') - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - - # Execute cleanup - utils._cleanup_resources('test-deployment', 'test-rg') - - # Verify all expected commands were called - command_patterns = [ - 'az deployment group show --name test-deployment -g test-rg', - 'az cognitiveservices account list -g test-rg', - 'az cognitiveservices account delete -g test-rg -n cog-service-1', - 'az cognitiveservices account purge -g test-rg -n cog-service-1 --location "eastus"', - 'az cognitiveservices account delete -g test-rg -n cog-service-2', - 'az cognitiveservices account purge -g test-rg -n cog-service-2 --location "westus"', - 'az apim list -g test-rg', - 'az apim delete -n apim-service-1 -g test-rg -y', - 'az apim deletedservice purge --service-name apim-service-1 --location "eastus"', - 'az apim delete -n apim-service-2 -g test-rg -y', - 'az apim deletedservice purge --service-name apim-service-2 --location "westus"', - 'az keyvault list -g test-rg', - 'az keyvault delete -n kv-vault-1 -g test-rg', - 'az keyvault purge -n kv-vault-1 --location "eastus"', - 'az keyvault delete -n kv-vault-2 -g test-rg', - 'az keyvault purge -n kv-vault-2 --location "westus"', - 'az group delete --name test-rg -y' - ] - - for pattern in command_patterns: - assert any(pattern in cmd for cmd in run_commands), f"Expected command pattern not found: {pattern}" - - -def test_cleanup_resources_no_resources(monkeypatch): - """Test _cleanup_resources when no resources exist.""" - run_commands = [] - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - run_commands.append(command) - - # Mock deployment show response - if 'deployment group show' in command: - return utils.Output(success=True, text='{"properties": {"provisioningState": "Succeeded"}}') - - # Mock empty resource lists - if any(x in command for x in ['cognitiveservices account list', 'apim list', 'keyvault list']): - return utils.Output(success=True, text='[]') - - # Default successful response - return utils.Output(success=True, text='Operation completed') - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - - # Execute cleanup - utils._cleanup_resources('test-deployment', 'test-rg') - - # Verify only listing and resource group deletion commands were called - expected_commands = [ - 'az deployment group show --name test-deployment -g test-rg', - 'az cognitiveservices account list -g test-rg', - 'az apim list -g test-rg', - 'az keyvault list -g test-rg', - 'az group delete --name test-rg -y' - ] - - for expected in expected_commands: - assert any(expected in cmd for cmd in run_commands), f"Expected command not found: {expected}" - - # Verify no delete/purge commands for individual resources - delete_purge_patterns = ['delete -n', 'purge -n', 'deletedservice purge'] - for pattern in delete_purge_patterns: - assert not any(pattern in cmd for cmd in run_commands), f"Unexpected delete/purge command found: {pattern}" - - -def test_cleanup_resources_command_failures(monkeypatch): - """Test _cleanup_resources when commands fail.""" - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - # Mock deployment show failure - if 'deployment group show' in command: - return utils.Output(success=False, text='Deployment not found') - - # All other commands succeed - return utils.Output(success=True, json_data=[]) - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - - # Should not raise exception even when deployment show fails - utils._cleanup_resources('test-deployment', 'test-rg') - - -def test_cleanup_resources_exception_handling(monkeypatch): - """Test _cleanup_resources exception handling.""" - exception_caught = [] - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - raise Exception("Simulated Azure CLI error") - - def mock_print(message): - exception_caught.append(message) - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr('builtins.print', mock_print) - monkeypatch.setattr('traceback.print_exc', lambda: None) - - # Should handle exception gracefully - utils._cleanup_resources('test-deployment', 'test-rg') - - # Verify exception was caught and printed - assert any('An error occurred during cleanup:' in msg for msg in exception_caught) - -def test_cleanup_infra_deployment_single(monkeypatch): - monkeypatch.setattr(utils, '_cleanup_resources', lambda deployment_name, rg_name: None) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) # Single item in list should use sequential mode - - -def test_cleanup_infra_deployments_parallel_mode(monkeypatch): - """Test cleanup_infra_deployments with multiple indexes using parallel execution.""" - cleanup_calls = [] - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) - return True, "" # Return success - - def mock_get_infra_rg_name(deployment, index): - return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' - - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Test with multiple indexes (should use parallel mode) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) - - # Verify all cleanup calls were made - assert len(cleanup_calls) == 3 - - # Check that the correct resource groups were targeted - expected_rgs = [ - 'apim-infra-simple-apim-1', - 'apim-infra-simple-apim-2', - 'apim-infra-simple-apim-3' - ] - actual_rgs = [call[1] for call in cleanup_calls] - assert set(actual_rgs) == set(expected_rgs) - - # Check that thread prefixes contain the correct infrastructure and index info - for deployment_name, _rg_name, thread_prefix, thread_color in cleanup_calls: - assert deployment_name == 'simple-apim' - assert 'simple-apim' in thread_prefix - assert thread_color in utils.THREAD_COLORS - - -def test_cleanup_infra_deployments_parallel_with_failures(monkeypatch): - """Test parallel cleanup handling when some threads fail.""" - cleanup_calls = [] - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - cleanup_calls.append((deployment_name, rg_name)) - # Simulate failure for index 2 - if 'simple-apim-2' in rg_name: - return False, "Simulated failure for testing" - return True, "" - - def mock_get_infra_rg_name(deployment, index): - return f'apim-infra-{deployment.value}-{index}' - - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_warning', lambda *a, **kw: None) - - # Test with multiple indexes where one fails - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) - - # Verify all cleanup attempts were made despite failure - assert len(cleanup_calls) == 3 - - -def test_cleanup_resources_thread_safe_success(monkeypatch): - """Test the thread-safe cleanup wrapper with successful execution.""" - original_calls = [] - - def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): - original_calls.append((deployment_name, rg_name)) - - monkeypatch.setattr(utils, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) - - # Test successful cleanup - success, error_msg = utils._cleanup_resources_thread_safe( - 'test-deployment', 'test-rg', '[TEST]: ', utils.BOLD_G - ) - - assert success is True - assert not error_msg - assert len(original_calls) == 1 - assert original_calls[0] == ('test-deployment', 'test-rg') - - -def test_cleanup_resources_thread_safe_failure(monkeypatch): - """Test the thread-safe cleanup wrapper with exception handling.""" - def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): - raise Exception("Simulated cleanup failure") - - monkeypatch.setattr(utils, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) - - # Test failed cleanup - success, error_msg = utils._cleanup_resources_thread_safe( - 'test-deployment', 'test-rg', '[TEST]: ', utils.BOLD_G - ) - - assert success is False - assert "Simulated cleanup failure" in error_msg - - -def test_cleanup_infra_deployments_max_workers_limit(monkeypatch): - """Test that parallel cleanup properly handles different numbers of indexes.""" - cleanup_calls = [] - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) - return True, "" - - def mock_get_infra_rg_name(deployment, index): - return f'rg-{deployment.value}-{index}' - - # Mock Azure CLI calls to avoid real execution - def mock_run(*args, **kwargs): - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Test with 6 indexes (should use parallel mode and handle all indexes) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3, 4, 5, 6]) - - # Verify all 6 cleanup calls were made - assert len(cleanup_calls) == 6, f"Expected 6 cleanup calls, got {len(cleanup_calls)}" - - # Check that the correct resource groups were targeted - expected_rgs = [f'rg-simple-apim-{i}' for i in range(1, 7)] - actual_rgs = [call[1] for call in cleanup_calls] - assert set(actual_rgs) == set(expected_rgs), f"Expected RGs {expected_rgs}, got {actual_rgs}" - - # Test with 2 indexes (should use parallel mode) - cleanup_calls.clear() - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2]) - - assert len(cleanup_calls) == 2, f"Expected 2 cleanup calls, got {len(cleanup_calls)}" - - # Test that thread prefixes and colors are assigned properly - for call in cleanup_calls: - deployment_name, _rg_name, thread_prefix, thread_color = call - assert deployment_name == 'simple-apim' - assert 'simple-apim' in thread_prefix - assert thread_color in utils.THREAD_COLORS - - -def test_cleanup_infra_deployments_thread_color_assignment(monkeypatch): - """Test that thread colors are assigned correctly and cycle through available colors.""" - cleanup_calls = [] - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) - return True, "" - - def mock_get_infra_rg_name(deployment, index): - return f'apim-infra-{deployment.value}-{index}' - - # Mock Azure CLI calls to avoid real execution - def mock_run(*args, **kwargs): - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Test with more indexes than available colors to verify cycling - num_colors = len(utils.THREAD_COLORS) - test_indexes = list(range(1, num_colors + 3)) # More than available colors - - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, test_indexes) - - # Sort the calls by the index extracted from the rg_name to check in deterministic order - cleanup_calls_sorted = sorted(cleanup_calls, key=lambda x: int(x[1].split('-')[-1])) - assigned_colors_sorted = [call[3] for call in cleanup_calls_sorted] - - # First num_colors should use each color once - for i in range(num_colors): - expected_color = utils.THREAD_COLORS[i % num_colors] - assert assigned_colors_sorted[i] == expected_color - - # Additional colors should cycle back to the beginning - if len(assigned_colors_sorted) > num_colors: - assert assigned_colors_sorted[num_colors] == utils.THREAD_COLORS[0] - assert assigned_colors_sorted[num_colors + 1] == utils.THREAD_COLORS[1] - - -def test_cleanup_infra_deployments_all_infrastructure_types(monkeypatch): - """Test cleanup_infra_deployments with all infrastructure types.""" - cleanup_calls = [] - - def mock_cleanup_resources(deployment_name, rg_name): - cleanup_calls.append((deployment_name, rg_name)) - - def mock_get_infra_rg_name(deployment, index): - return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' - - monkeypatch.setattr(utils, '_cleanup_resources', mock_cleanup_resources) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - - # Test all infrastructure types - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) - utils.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, 2) - utils.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, 3) - - # Verify correct calls were made - assert ('simple-apim', 'apim-infra-simple-apim-1') in cleanup_calls - assert ('apim-aca', 'apim-infra-apim-aca-2') in cleanup_calls - assert ('afd-apim-pe', 'apim-infra-afd-apim-pe-3') in cleanup_calls - - -def test_cleanup_infra_deployments_index_scenarios(monkeypatch): - """Test cleanup_infra_deployments with various index scenarios.""" - cleanup_calls = [] - thread_safe_calls = [] - - def mock_cleanup_resources(deployment_name, rg_name): - cleanup_calls.append((deployment_name, rg_name)) - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - thread_safe_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) - return True, "" - - def mock_get_infra_rg_name(deployment, index): - return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' - - # Mock Azure CLI calls to avoid real execution - def mock_run(*args, **kwargs): - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, '_cleanup_resources', mock_cleanup_resources) - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Test None index (sequential) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) - - # Test single integer index (sequential) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 5) - - # Test single item list (sequential) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) - - # Test list of integers (parallel) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [2, 3]) - - # Test tuple of integers (parallel) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, (4, 5)) - - # Test empty list (sequential, with no index) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, []) - - # Verify sequential calls - expected_sequential_calls = [ - ('simple-apim', 'apim-infra-simple-apim'), # None index - ('simple-apim', 'apim-infra-simple-apim-5'), # Single index 5 - ('simple-apim', 'apim-infra-simple-apim-1'), # Single item list [1] - ('simple-apim', 'apim-infra-simple-apim'), # Empty list (None index) - ] - - for expected_call in expected_sequential_calls: - assert expected_call in cleanup_calls, f"Expected sequential call {expected_call} not found in {cleanup_calls}" - - # Verify parallel calls (extract just the deployment and rg_name parts) - parallel_calls = [(call[0], call[1]) for call in thread_safe_calls] - expected_parallel_calls = [ - ('simple-apim', 'apim-infra-simple-apim-2'), # List [2, 3] - first - ('simple-apim', 'apim-infra-simple-apim-3'), # List [2, 3] - second - ('simple-apim', 'apim-infra-simple-apim-4'), # Tuple (4, 5) - first - ('simple-apim', 'apim-infra-simple-apim-5'), # Tuple (4, 5) - second - ] - - for expected_call in expected_parallel_calls: - assert expected_call in parallel_calls, f"Expected parallel call {expected_call} not found in {parallel_calls}" - # ------------------------------ # EXTRACT_JSON EDGE CASES @@ -1388,200 +913,6 @@ def test_get_azure_role_guid_comprehensive(monkeypatch): assert result is None -def test_cleanup_functions_comprehensive(monkeypatch): - """Test cleanup functions with various scenarios.""" - run_commands = [] - - def mock_run(command, ok_message='', error_message='', print_output=False, print_command_to_run=True, print_errors=True, print_warnings=True): - run_commands.append(command) - - # Return appropriate mock responses - if 'deployment group show' in command: - return utils.Output(success=True, json_data={ - 'properties': {'provisioningState': 'Succeeded'} - }) - - # Return empty lists for resource queries to avoid complex mocking - if any(x in command for x in ['list -g', 'list']): - return utils.Output(success=True, json_data=[]) - - return utils.Output(success=True, text='{}') - - def mock_get_infra_rg_name(deployment, index): - return f'test-rg-{deployment.value}-{index}' if index else f'test-rg-{deployment.value}' - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - - # Test _cleanup_resources (private function) - utils._cleanup_resources('test-deployment', 'test-rg') # Should not raise - - # Test cleanup_infra_deployments with INFRASTRUCTURE enum (correct function name and parameter type) - - # Test with all infrastructure types - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM) - utils.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, 1) - utils.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, [1, 2]) - - # Verify commands were executed - assert len(run_commands) > 0 - - -def test_cleanup_edge_cases_comprehensive(monkeypatch): - """Test cleanup functions with edge cases and error conditions.""" - - # Test with different index types - cleanup_calls = [] - - def mock_cleanup_resources(deployment_name, rg_name): - cleanup_calls.append((deployment_name, rg_name)) - return True, "" - - def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): - cleanup_calls.append((deployment_name, rg_name)) - return True, "" - - def mock_get_infra_rg_name(deployment, index): - return f'rg-{deployment.value}-{index}' if index is not None else f'rg-{deployment.value}' - - # Mock Azure CLI calls to avoid real execution - def mock_run(*args, **kwargs): - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, '_cleanup_resources', mock_cleanup_resources) - monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Test with zero index (single index, uses sequential path) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 0) - assert ('simple-apim', 'rg-simple-apim-0') in cleanup_calls - - # Test with negative index (single index, uses sequential path) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, -1) - assert ('simple-apim', 'rg-simple-apim--1') in cleanup_calls - - # Test with large index (single index, uses sequential path) - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 9999) - assert ('simple-apim', 'rg-simple-apim-9999') in cleanup_calls - - # Test with mixed positive and negative indexes in list (multiple indexes, uses parallel path) - cleanup_calls.clear() - utils.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, [-1, 0, 1]) - expected = [ - ('apim-aca', 'rg-apim-aca--1'), - ('apim-aca', 'rg-apim-aca-0'), - ('apim-aca', 'rg-apim-aca-1') - ] - for call in expected: - assert call in cleanup_calls # Test with single-item list - cleanup_calls.clear() - utils.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, [42]) - assert ('afd-apim-pe', 'rg-afd-apim-pe-42') in cleanup_calls - - -def test_cleanup_resources_partial_failures(monkeypatch): - """Test _cleanup_resources when some operations fail with parallel cleanup.""" - run_commands = [] - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - run_commands.append(command) - - # Mock deployment show response - if 'deployment group show' in command: - return utils.Output(success=True, text='{"properties": {"provisioningState": "Failed"}}') - - # Mock resources exist - if 'cognitiveservices account list' in command: - return utils.Output(success=True, text='[{"name": "cog-service-1", "location": "eastus"}]') - - if 'apim list' in command: - return utils.Output(success=True, text='[{"name": "apim-service-1", "location": "eastus"}]') - - if 'keyvault list' in command: - return utils.Output(success=True, text='[{"name": "kv-vault-1", "location": "eastus"}]') - - # Simulate failure for delete operations but success for purge - if 'delete' in command and ('cognitiveservices' in command or 'apim delete' in command or 'keyvault delete' in command): - return utils.Output(success=False, text='Delete failed') - - # Simulate failure for purge operations - if 'purge' in command: - return utils.Output(success=False, text='Purge failed') - - # Resource group deletion succeeds - return utils.Output(success=True, text='Operation completed') - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_success', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_warning', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - - # Should not raise exception even when individual operations fail - utils._cleanup_resources('test-deployment', 'test-rg') - - # Verify all listing and group operations were attempted - # Note: With parallel cleanup, if delete fails, purge is not attempted (expected behavior) - expected_patterns = [ - 'deployment group show', - 'cognitiveservices account list', - 'apim list', - 'keyvault list', - 'group delete' - ] - - for pattern in expected_patterns: - assert any(pattern in cmd for cmd in run_commands), f"Expected command pattern not found: {pattern}" - - # Verify delete attempts were made (even though they failed) - delete_patterns = [ - 'cognitiveservices account delete', - 'apim delete', - 'keyvault delete' - ] - - for pattern in delete_patterns: - assert any(pattern in cmd for cmd in run_commands), f"Expected delete command pattern not found: {pattern}" - - -def test_cleanup_resources_malformed_responses(monkeypatch): - """Test _cleanup_resources with malformed API responses.""" - - def mock_run(command, ok_message='', error_message='', print_command_to_run=True, print_errors=True, print_warnings=True): - - # Mock deployment show with missing properties - if 'deployment group show' in command: - return utils.Output(success=True, text='{}') - - # Mock malformed resource responses (missing required fields) - if 'cognitiveservices account list' in command: - return utils.Output(success=True, text='[{"name": "cog-service-1"}, {"location": "eastus"}, {}]') - - if 'apim list' in command: - return utils.Output(success=True, text='[{"name": "apim-service-1"}, {"location": "eastus"}]') - - if 'keyvault list' in command: - return utils.Output(success=True, text='[{"name": "kv-vault-1"}]') - - # Default response for delete/purge operations - return utils.Output(success=True, text='Operation completed') - - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(utils, 'print_message', lambda *a, **kw: None) - - # Should handle malformed responses gracefully without raising exceptions - utils._cleanup_resources('test-deployment', 'test-rg') - - - # ------------------------------ # INFRASTRUCTURE SELECTION TESTS # ------------------------------ From d16a843bed0905d0a4669b8bfd9efef292b1988d Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 16:47:50 -0500 Subject: [PATCH 07/23] Move json utils tests --- shared/python/utils.py | 82 --------------------------------- tests/python/test_json_utils.py | 81 ++++++++++++++++++++++++++++++++ tests/python/test_utils.py | 81 ++------------------------------ 3 files changed, 86 insertions(+), 158 deletions(-) create mode 100644 tests/python/test_json_utils.py diff --git a/shared/python/utils.py b/shared/python/utils.py index cdaf66c..04ab8f3 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -628,7 +628,6 @@ def get_azure_role_guid(role_name: str) -> Optional[str]: return None - def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: str | INFRASTRUCTURE, bicep_parameters: dict, bicep_parameters_file: str = 'params.json', rg_tags: dict | None = None, is_debug: bool = False) -> Output: """ Create a Bicep deployment in a resource group, writing parameters to a file and running the deployment. @@ -693,7 +692,6 @@ def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: st print('\nDeploying bicep...\n') return run(cmd, f"Deployment '{deployment_name}' succeeded", f"Deployment '{deployment_name}' failed.", print_command_to_run = False) - # TODO: Reconcile this with apimtypes.py _get_project_root def find_project_root() -> str: """ @@ -720,7 +718,6 @@ def find_project_root() -> str: # If we can't find the project root, raise an error raise FileNotFoundError('Could not determine project root directory') - def create_bicep_deployment_group_for_sample(sample_name: str, rg_name: str, rg_location: str, bicep_parameters: dict, bicep_parameters_file: str = 'params.json', rg_tags: dict | None = None, is_debug: bool = False) -> Output: """ Create a Bicep deployment for a sample, handling the working directory change automatically. @@ -774,7 +771,6 @@ def create_bicep_deployment_group_for_sample(sample_name: str, rg_name: str, rg_ os.chdir(original_cwd) print(f'📁 Restored working directory to: {original_cwd}') - def create_resource_group(rg_name: str, resource_group_location: str | None = None, tags: dict | None = None) -> None: """ Create a resource group in Azure if it does not already exist. @@ -1065,80 +1061,6 @@ def read_policy_xml(policy_xml_filepath_or_filename: str, named_values: dict[str return policy_template_xml -def extract_json(text: str) -> Any: - """ - Extract the first valid JSON object or array from a string and return it as a Python object. - - This function searches the input string for the first occurrence of a JSON object or array (delimited by '{' or '['), - and attempts to decode it using json.JSONDecoder().raw_decode. If the input is already valid JSON, it is returned as a Python object. - If no valid JSON is found, None is returned. - - Args: - text (str): The string to search for a JSON object or array. - - Returns: - Any | None: The extracted JSON as a Python object (dict or list), or None if not found or not valid. - """ - - if not isinstance(text, str): - return None - - # If the string is already valid JSON, parse and return it as a Python object. - if is_string_json(text): - try: - return json.loads(text) - except json.JSONDecodeError: - # If JSON parsing fails despite is_string_json returning True, - # fall through to substring search - pass - - decoder = json.JSONDecoder() - - for start in range(len(text)): - if text[start] in ('{', '['): - try: - obj, _ = decoder.raw_decode(text[start:]) - return obj - except Exception: - continue - - return None - -def is_string_json(text: str) -> bool: - """ - Check if the provided string is a valid JSON object or array. - - Args: - text (str): The string to check. - - Returns: - bool: True if the string is valid JSON, False otherwise. - """ - - # Accept only str, bytes, or bytearray as valid input for JSON parsing. - if not isinstance(text, (str, bytes, bytearray)): - return False - - # Skip empty or whitespace-only strings - if not text or not text.strip(): - return False - - # First try JSON parsing (handles double quotes) - try: - json.loads(text) - return True - except json.JSONDecodeError: - pass - - # If JSON fails, try Python literal evaluation (handles single quotes) - try: - ast.literal_eval(text) - return True - except (ValueError, SyntaxError): - pass - - return False - def get_account_info() -> Tuple[str, str, str, str]: """ Retrieve the current Azure account information using the Azure CLI. @@ -1225,7 +1147,6 @@ def get_frontdoor_url(deployment_name: INFRASTRUCTURE, rg_name: str) -> str | No return afd_endpoint_url - def get_apim_url(rg_name: str) -> str | None: """ Retrieve the gateway URL for the API Management service in the specified resource group. @@ -1255,7 +1176,6 @@ def get_apim_url(rg_name: str) -> str | None: return apim_endpoint_url - def get_appgw_endpoint(rg_name: str) -> tuple[str | None, str | None]: """ Retrieve the hostname and public IP address for the Application Gateway in the specified resource group. @@ -1661,8 +1581,6 @@ def test_url_preflight_check(deployment: INFRASTRUCTURE, rg_name: str, apim_gate return endpoint_url - - def get_endpoints(deployment: INFRASTRUCTURE, rg_name: str) -> Endpoints: print_message(f'Identifying possible endpoints for infrastructure {deployment}...') diff --git a/tests/python/test_json_utils.py b/tests/python/test_json_utils.py new file mode 100644 index 0000000..443d969 --- /dev/null +++ b/tests/python/test_json_utils.py @@ -0,0 +1,81 @@ +""" +Unit tests for test_utils.py. +""" + +import pytest +import json +import json_utils + + +# ------------------------------ +# is_string_json +# ------------------------------ + +@pytest.mark.parametrize( + 'input_str,expected', + [ + ('{\"a\": 1}', True), + ('[1, 2, 3]', True), + ('not json', False), + ('{\"a\": 1', False), + ('', False), + (None, False), + (123, False), + ] +) +def test_is_string_json(input_str, expected): + assert json_utils.is_string_json(input_str) is expected + + +# ------------------------------ +# EXTRACT_JSON EDGE CASES +# ------------------------------ + +@pytest.mark.parametrize( + 'input_val,expected', + [ + (None, None), + (123, None), + ([], None), + ('', None), + (' ', None), + ('not json', None), + ('{\"a\": 1}', {'a': 1}), + ('[1, 2, 3]', [1, 2, 3]), + (' {\"a\": 1} ', {'a': 1}), + ('prefix {\"foo\": 42} suffix', {'foo': 42}), + ('prefix [1, 2, 3] suffix', [1, 2, 3]), + ('{\"a\": 1}{\"b\": 2}', {'a': 1}), # Only first JSON object + ('[1, 2, 3][4, 5, 6]', [1, 2, 3]), # Only first JSON array + ('{\"a\": [1, 2, {\"b\": 3}]}', {'a': [1, 2, {'b': 3}]}), + ('\n\t{\"a\": 1}\n', {'a': 1}), + ('{\"a\": \"b \\u1234\"}', {'a': 'b \u1234'}), + ('{\"a\": 1} [2, 3]', {'a': 1}), # Object before array + ('[2, 3] {\"a\": 1}', [2, 3]), # Array before object + ('{\"a\": 1, \"b\": {\"c\": 2}}', {'a': 1, 'b': {'c': 2}}), + ('{\"a\": 1, \"b\": [1, 2, 3]}', {'a': 1, 'b': [1, 2, 3]}), + ('\n\n[\n1, 2, 3\n]\n', [1, 2, 3]), + ('{\"a\": 1, \"b\": null}', {'a': 1, 'b': None}), + ('{\"a\": true, \"b\": false}', {'a': True, 'b': False}), + ('{\"a\": 1, \"b\": \"c\"}', {'a': 1, 'b': 'c'}), + ('{\"a\": 1, \"b\": [1, 2, {\"c\": 3}]} ', {'a': 1, 'b': [1, 2, {'c': 3}]}), + ('{\"a\": 1, \"b\": [1, 2, {\"c\": 3, \"d\": [4, 5]}]} ', {'a': 1, 'b': [1, 2, {'c': 3, 'd': [4, 5]}]}), + ] +) +def test_extract_json_edge_cases(input_val, expected): + """Test extract_json with a wide range of edge cases and malformed input.""" + result = json_utils.extract_json(input_val) + assert result == expected + +def test_extract_json_large_object(): + """Test extract_json with a large JSON object.""" + large_obj = {'a': list(range(1000)), 'b': {'c': 'x' * 1000}} + s = json.dumps(large_obj) + assert json_utils.extract_json(s) == large_obj + +def test_extract_json_multiple_json_types(): + """Test extract_json returns the first valid JSON (object or array) in the string.""" + s = '[1,2,3]{"a": 1}' + assert json_utils.extract_json(s) == [1, 2, 3] + s2 = '{"a": 1}[1,2,3]' + assert json_utils.extract_json(s2) == {'a': 1} diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index a25b306..1cec072 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -11,25 +11,7 @@ import pytest from apimtypes import INFRASTRUCTURE, APIM_SKU import utils - -# ------------------------------ -# is_string_json -# ------------------------------ - -@pytest.mark.parametrize( - 'input_str,expected', - [ - ('{\"a\": 1}', True), - ('[1, 2, 3]', True), - ('not json', False), - ('{\"a\": 1', False), - ('', False), - (None, False), - (123, False), - ] -) -def test_is_string_json(input_str, expected): - assert utils.is_string_json(input_str) is expected +import json_utils # ------------------------------ # get_account_info @@ -234,59 +216,6 @@ def mock_inspect_currentframe(): utils.read_policy_xml('policy.xml', {'key': 'value'}) -# ------------------------------ -# EXTRACT_JSON EDGE CASES -# ------------------------------ - -@pytest.mark.parametrize( - 'input_val,expected', - [ - (None, None), - (123, None), - ([], None), - ('', None), - (' ', None), - ('not json', None), - ('{\"a\": 1}', {'a': 1}), - ('[1, 2, 3]', [1, 2, 3]), - (' {\"a\": 1} ', {'a': 1}), - ('prefix {\"foo\": 42} suffix', {'foo': 42}), - ('prefix [1, 2, 3] suffix', [1, 2, 3]), - ('{\"a\": 1}{\"b\": 2}', {'a': 1}), # Only first JSON object - ('[1, 2, 3][4, 5, 6]', [1, 2, 3]), # Only first JSON array - ('{\"a\": [1, 2, {\"b\": 3}]}', {'a': [1, 2, {'b': 3}]}), - ('\n\t{\"a\": 1}\n', {'a': 1}), - ('{\"a\": \"b \\u1234\"}', {'a': 'b \u1234'}), - ('{\"a\": 1} [2, 3]', {'a': 1}), # Object before array - ('[2, 3] {\"a\": 1}', [2, 3]), # Array before object - ('{\"a\": 1, \"b\": {\"c\": 2}}', {'a': 1, 'b': {'c': 2}}), - ('{\"a\": 1, \"b\": [1, 2, 3]}', {'a': 1, 'b': [1, 2, 3]}), - ('\n\n[\n1, 2, 3\n]\n', [1, 2, 3]), - ('{\"a\": 1, \"b\": null}', {'a': 1, 'b': None}), - ('{\"a\": true, \"b\": false}', {'a': True, 'b': False}), - ('{\"a\": 1, \"b\": \"c\"}', {'a': 1, 'b': 'c'}), - ('{\"a\": 1, \"b\": [1, 2, {\"c\": 3}]} ', {'a': 1, 'b': [1, 2, {'c': 3}]}), - ('{\"a\": 1, \"b\": [1, 2, {\"c\": 3, \"d\": [4, 5]}]} ', {'a': 1, 'b': [1, 2, {'c': 3, 'd': [4, 5]}]}), - ] -) -def test_extract_json_edge_cases(input_val, expected): - """Test extract_json with a wide range of edge cases and malformed input.""" - result = utils.extract_json(input_val) - assert result == expected - -def test_extract_json_large_object(): - """Test extract_json with a large JSON object.""" - large_obj = {'a': list(range(1000)), 'b': {'c': 'x' * 1000}} - s = json.dumps(large_obj) - assert utils.extract_json(s) == large_obj - -def test_extract_json_multiple_json_types(): - """Test extract_json returns the first valid JSON (object or array) in the string.""" - s = '[1,2,3]{"a": 1}' - assert utils.extract_json(s) == [1, 2, 3] - s2 = '{"a": 1}[1,2,3]' - assert utils.extract_json(s2) == {'a': 1} - # ------------------------------ # validate_infrastructure # ------------------------------ @@ -801,10 +730,10 @@ def mock_chdir(path): def test_extract_json_invalid_input(): """Test extract_json with various invalid inputs.""" - assert utils.extract_json(None) is None - assert utils.extract_json(123) is None - assert utils.extract_json([1, 2, 3]) is None - assert utils.extract_json('not json at all') is None + assert json_utils.extract_json(None) is None + assert json_utils.extract_json(123) is None + assert json_utils.extract_json([1, 2, 3]) is None + assert json_utils.extract_json('not json at all') is None def test_generate_signing_key_format(): From fde4a7cd77924bc20743c50afcf23ebcc823d84b Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 17:20:18 -0500 Subject: [PATCH 08/23] Minor refactoring --- infrastructure/afd-apim-pe/clean-up.ipynb | 4 ++-- infrastructure/afd-apim-pe/create.ipynb | 8 ++++---- infrastructure/apim-aca/clean-up.ipynb | 4 ++-- infrastructure/apim-aca/create.ipynb | 8 ++++---- infrastructure/appgw-apim-pe/clean-up.ipynb | 4 ++-- infrastructure/appgw-apim-pe/create.ipynb | 8 ++++---- infrastructure/simple-apim/clean-up.ipynb | 4 ++-- infrastructure/simple-apim/create.ipynb | 8 ++++---- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/infrastructure/afd-apim-pe/clean-up.ipynb b/infrastructure/afd-apim-pe/clean-up.ipynb index 2cb800b..65c275f 100644 --- a/infrastructure/afd-apim-pe/clean-up.ipynb +++ b/infrastructure/afd-apim-pe/clean-up.ipynb @@ -15,13 +15,13 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", "from apimtypes import INFRASTRUCTURE\n", + "from infrastructures import cleanup_infra_deployments\n", "\n", "deployment = INFRASTRUCTURE.AFD_APIM_PE\n", "indexes = [1]\n", "\n", - "utils.cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(deployment, indexes)" ] } ], diff --git a/infrastructure/afd-apim-pe/create.ipynb b/infrastructure/afd-apim-pe/create.ipynb index 20e65da..43091ae 100644 --- a/infrastructure/afd-apim-pe/create.ipynb +++ b/infrastructure/afd-apim-pe/create.ipynb @@ -19,8 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", - "from apimtypes import *\n", + "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", + "from utils import InfrastructureNotebookHelper, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -36,10 +36,10 @@ "# SYSTEM CONFIGURATION\n", "# ------------------------------\n", "\n", - "inb_helper = utils.InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.AFD_APIM_PE, index, apim_sku) \n", + "inb_helper = InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.AFD_APIM_PE, index, apim_sku)\n", "inb_helper.create_infrastructure()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] }, { diff --git a/infrastructure/apim-aca/clean-up.ipynb b/infrastructure/apim-aca/clean-up.ipynb index 705de29..81834a7 100644 --- a/infrastructure/apim-aca/clean-up.ipynb +++ b/infrastructure/apim-aca/clean-up.ipynb @@ -15,13 +15,13 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", "from apimtypes import INFRASTRUCTURE\n", + "from infrastructures import cleanup_infra_deployments\n", "\n", "deployment = INFRASTRUCTURE.APIM_ACA\n", "indexes = [1]\n", "\n", - "utils.cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(deployment, indexes)" ] } ], diff --git a/infrastructure/apim-aca/create.ipynb b/infrastructure/apim-aca/create.ipynb index 7e8b99e..8c5f107 100644 --- a/infrastructure/apim-aca/create.ipynb +++ b/infrastructure/apim-aca/create.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", - "from apimtypes import *\n", + "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", + "from utils import InfrastructureNotebookHelper, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -34,10 +34,10 @@ "# SYSTEM CONFIGURATION\n", "# ------------------------------\n", "\n", - "inb_helper = utils.InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.APIM_ACA, index, apim_sku) \n", + "inb_helper = InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.APIM_ACA, index, apim_sku)\n", "inb_helper.create_infrastructure()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] }, { diff --git a/infrastructure/appgw-apim-pe/clean-up.ipynb b/infrastructure/appgw-apim-pe/clean-up.ipynb index 010dfbb..151cad6 100644 --- a/infrastructure/appgw-apim-pe/clean-up.ipynb +++ b/infrastructure/appgw-apim-pe/clean-up.ipynb @@ -15,13 +15,13 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", "from apimtypes import INFRASTRUCTURE\n", + "from infrastructures import cleanup_infra_deployments\n", "\n", "deployment = INFRASTRUCTURE.APPGW_APIM_PE\n", "indexes = [1]\n", "\n", - "utils.cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(deployment, indexes)" ] } ], diff --git a/infrastructure/appgw-apim-pe/create.ipynb b/infrastructure/appgw-apim-pe/create.ipynb index 65eae81..545453f 100644 --- a/infrastructure/appgw-apim-pe/create.ipynb +++ b/infrastructure/appgw-apim-pe/create.ipynb @@ -19,8 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", - "from apimtypes import *\n", + "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", + "from utils import InfrastructureNotebookHelper, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -36,10 +36,10 @@ "# SYSTEM CONFIGURATION\n", "# ------------------------------\n", "\n", - "inb_helper = utils.InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.APPGW_APIM_PE, index, apim_sku)\n", + "inb_helper = InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.APPGW_APIM_PE, index, apim_sku)\n", "inb_helper.create_infrastructure()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] }, { diff --git a/infrastructure/simple-apim/clean-up.ipynb b/infrastructure/simple-apim/clean-up.ipynb index 9307142..722b2b3 100644 --- a/infrastructure/simple-apim/clean-up.ipynb +++ b/infrastructure/simple-apim/clean-up.ipynb @@ -15,13 +15,13 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", "from apimtypes import INFRASTRUCTURE\n", + "from infrastructures import cleanup_infra_deployments\n", "\n", "deployment = INFRASTRUCTURE.SIMPLE_APIM\n", "indexes = [1]\n", "\n", - "utils.cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(deployment, indexes)" ] } ], diff --git a/infrastructure/simple-apim/create.ipynb b/infrastructure/simple-apim/create.ipynb index cc5f545..8be0418 100644 --- a/infrastructure/simple-apim/create.ipynb +++ b/infrastructure/simple-apim/create.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", - "from apimtypes import *\n", + "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", + "from utils import InfrastructureNotebookHelper, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -34,10 +34,10 @@ "# SYSTEM CONFIGURATION\n", "# ------------------------------\n", "\n", - "inb_helper = utils.InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.SIMPLE_APIM, index, apim_sku) \n", + "inb_helper = InfrastructureNotebookHelper(rg_location, INFRASTRUCTURE.SIMPLE_APIM, index, apim_sku)\n", "inb_helper.create_infrastructure()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] }, { From a34aaf00722fa0ad2e80995b8d2e2437725817a1 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Wed, 10 Dec 2025 17:25:47 -0500 Subject: [PATCH 09/23] Minor refactoring --- infrastructure/afd-apim-pe/clean-up.ipynb | 5 ++--- infrastructure/apim-aca/clean-up.ipynb | 5 ++--- infrastructure/appgw-apim-pe/clean-up.ipynb | 5 ++--- infrastructure/simple-apim/clean-up.ipynb | 5 ++--- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/infrastructure/afd-apim-pe/clean-up.ipynb b/infrastructure/afd-apim-pe/clean-up.ipynb index 65c275f..38d93d9 100644 --- a/infrastructure/afd-apim-pe/clean-up.ipynb +++ b/infrastructure/afd-apim-pe/clean-up.ipynb @@ -18,10 +18,9 @@ "from apimtypes import INFRASTRUCTURE\n", "from infrastructures import cleanup_infra_deployments\n", "\n", - "deployment = INFRASTRUCTURE.AFD_APIM_PE\n", - "indexes = [1]\n", + "indexes = [1]\n", "\n", - "cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, indexes)" ] } ], diff --git a/infrastructure/apim-aca/clean-up.ipynb b/infrastructure/apim-aca/clean-up.ipynb index 81834a7..f5a75e4 100644 --- a/infrastructure/apim-aca/clean-up.ipynb +++ b/infrastructure/apim-aca/clean-up.ipynb @@ -18,10 +18,9 @@ "from apimtypes import INFRASTRUCTURE\n", "from infrastructures import cleanup_infra_deployments\n", "\n", - "deployment = INFRASTRUCTURE.APIM_ACA\n", - "indexes = [1]\n", + "indexes = [1]\n", "\n", - "cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, indexes)" ] } ], diff --git a/infrastructure/appgw-apim-pe/clean-up.ipynb b/infrastructure/appgw-apim-pe/clean-up.ipynb index 151cad6..3de98be 100644 --- a/infrastructure/appgw-apim-pe/clean-up.ipynb +++ b/infrastructure/appgw-apim-pe/clean-up.ipynb @@ -18,10 +18,9 @@ "from apimtypes import INFRASTRUCTURE\n", "from infrastructures import cleanup_infra_deployments\n", "\n", - "deployment = INFRASTRUCTURE.APPGW_APIM_PE\n", - "indexes = [1]\n", + "indexes = [1]\n", "\n", - "cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(INFRASTRUCTURE.APPGW_APIM_PE, indexes)" ] } ], diff --git a/infrastructure/simple-apim/clean-up.ipynb b/infrastructure/simple-apim/clean-up.ipynb index 722b2b3..1c0d70c 100644 --- a/infrastructure/simple-apim/clean-up.ipynb +++ b/infrastructure/simple-apim/clean-up.ipynb @@ -18,10 +18,9 @@ "from apimtypes import INFRASTRUCTURE\n", "from infrastructures import cleanup_infra_deployments\n", "\n", - "deployment = INFRASTRUCTURE.SIMPLE_APIM\n", - "indexes = [1]\n", + "indexes = [1]\n", "\n", - "cleanup_infra_deployments(deployment, indexes)" + "cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, indexes)" ] } ], From 8b09bf3dca2c2bb2e3d7b7495185770cf9cc0ad0 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Thu, 11 Dec 2025 11:31:52 -0500 Subject: [PATCH 10/23] Move Azure resource interactions to new azure_resources.py file --- .vscode/settings.json | 7 + shared/python/azure_resources.py | 692 +++++++++++++++++++++++++++ shared/python/utils.py | 602 +---------------------- tests/python/test_azure_resources.py | 466 ++++++++++++++++++ tests/python/test_utils.py | 508 +------------------- 5 files changed, 1180 insertions(+), 1095 deletions(-) create mode 100644 shared/python/azure_resources.py create mode 100644 tests/python/test_azure_resources.py diff --git a/.vscode/settings.json b/.vscode/settings.json index e9e706b..af7072e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -18,6 +18,13 @@ "python.defaultInterpreterPath": "./.venv/Scripts/python.exe", "python.pythonPath": "./.venv/Scripts/python.exe", "python.envFile": "${workspaceFolder}/.env", + "[python]": { + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit", + "source.unusedImports": "explicit" + }, + "editor.formatOnSave": true + }, "notebook.defaultLanguage": "python", "notebook.kernelPickerType": "mru", "terminal.integrated.defaultProfile.windows": "PowerShell", diff --git a/shared/python/azure_resources.py b/shared/python/azure_resources.py new file mode 100644 index 0000000..00bfb68 --- /dev/null +++ b/shared/python/azure_resources.py @@ -0,0 +1,692 @@ +""" +Module providing Azure resource management functions, often wrapped with additional functionality. + +This module contains functions for interacting with Azure resources, +including resource groups, deployments, and various Azure services. +""" + +import json +import time +import tempfile +import os +import re +import subprocess +import traceback + +from typing import Tuple, Optional +from apimtypes import INFRASTRUCTURE, Endpoints, Output +from console import print_ok, print_warning, print_error, print_val, print_message, print_info, print_command, print_success + + +# ------------------------------ +# PRIVATE FUNCTIONS +# ------------------------------ + +def _run(command: str, ok_message: str = '', error_message: str = '', print_output: bool = False, print_command_to_run: bool = True, print_errors: bool = True, print_warnings: bool = True) -> Output: + """ + Execute a shell command, log the command and its output, and attempt to extract JSON from the output. + + Args: + command (str): The shell command to execute. + ok_message (str, optional): Message to print if the command succeeds. Defaults to ''. + error_message (str, optional): Message to print if the command fails. Defaults to ''. + print_output (bool, optional): Whether to print the command output on failure. Defaults to False. + print_command_to_run (bool, optional): Whether to print the command before running it. Defaults to True. + print_errors (bool, optional): Whether to log error lines from the output. Defaults to True. + print_warnings (bool, optional): Whether to log warning lines from the output. Defaults to True. + + Returns: + Output: An Output object containing success status, text, and parsed JSON data. + """ + + if print_command_to_run: + print_command(command) + + start_time = time.time() + + # Execute the command and capture the output + try: + output_text = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT).decode('utf-8') + success = True + except Exception as e: + # Handles both CalledProcessError and any custom/other exceptions (for test mocks) + output_text = getattr(e, 'output', b'').decode('utf-8') if hasattr(e, 'output') and isinstance(e.output, (bytes, bytearray)) else str(e) + success = False + + if print_errors: + print_error(f'Command failed with error: {output_text}', duration = f'[{int((time.time() - start_time) // 60)}m:{int((time.time() - start_time) % 60)}s]') + traceback.print_exc() + + if print_output: + print(f'Command output:\n{output_text}') + + minutes, seconds = divmod(time.time() - start_time, 60) + + # Only print failures, warnings, or errors if print_output is True + if print_output: + for line in output_text.splitlines(): + l = line.strip() + + # Only log and skip lines that start with 'warning' or 'error' (case-insensitive) + if l.lower().startswith('warning'): + if l and print_warnings: + print_warning(l) + continue + elif l.lower().startswith('error'): + if l and print_errors: + print_error(l) + continue + + print_message = print_ok if success else print_error + + if (ok_message or error_message): + print_message(ok_message if success else error_message, output_text if not success or print_output else '', f'[{int(minutes)}m:{int(seconds)}s]') + + return Output(success, output_text) + + +# ------------------------------ +# PUBLIC FUNCTIONS +# ------------------------------ + +def cleanup_old_jwt_signing_keys(apim_name: str, resource_group_name: str, current_jwt_key_name: str) -> bool: + """ + Clean up old JWT signing keys from APIM named values for the same sample folder, keeping only the current key. + Uses regex matching to identify keys that belong to the same sample folder by extracting the sample folder + name from the current key and matching against the pattern 'JwtSigningKey-{sample_folder}-{timestamp}'. + + Args: + apim_name (str): Name of the APIM service + resource_group_name (str): Name of the resource group containing APIM + current_jwt_key_name (str): Name of the current JWT key to preserve (format: JwtSigningKey-{sample_folder}-{timestamp}) + + Returns: + bool: True if cleanup was successful, False otherwise + """ + + try: + print_message('🧹 Cleaning up old JWT signing keys for the same sample folder...', blank_above = True) + + # Extract sample folder name from current JWT key using regex + # Pattern: JwtSigningKey-{sample_folder}-{timestamp} + current_key_pattern = r'^JwtSigningKey-(.+)-\d+$' + current_key_match = re.match(current_key_pattern, current_jwt_key_name) + + if not current_key_match: + print_error(f"Current JWT key name '{current_jwt_key_name}' does not match expected pattern 'JwtSigningKey-{{sample_folder}}-{{timestamp}}'") + return False + + sample_folder = current_key_match.group(1) + print_info(f"Identified sample folder: '{sample_folder}'") + + # Get all named values that start with 'JwtSigningKey' + print_info(f"Getting all JWT signing key named values from APIM '{apim_name}'...") + + output = _run( + f'az apim nv list --service-name "{apim_name}" --resource-group "{resource_group_name}" --query "[?contains(name, \'JwtSigningKey\')].name" -o tsv', + 'Retrieved JWT signing keys', + 'Failed to retrieve JWT signing keys' + ) + + if not output.success: + print_error('Failed to retrieve JWT signing keys from APIM.') + return False + + if not output.text.strip(): + print_info('No JWT signing keys found. Nothing to clean up.') + return True + + # Parse the list of JWT keys + jwt_keys = [key.strip() for key in output.text.strip().split('\n') if key.strip()] + + # print_info(f'Found {len(jwt_keys)} total JWT signing keys.') + + # Filter keys that belong to the same sample folder using regex + sample_key_pattern = rf'^JwtSigningKey-{re.escape(sample_folder)}-\d+$' + sample_folder_keys = [key for key in jwt_keys if re.match(sample_key_pattern, key)] + + print_info(f"Found {len(sample_folder_keys)} JWT signing keys for sample folder '{sample_folder}'.") + + # Process each JWT key for this sample folder + deleted_count = 0 + kept_count = 0 + + for jwt_key in sample_folder_keys: + if jwt_key == current_jwt_key_name: + print_info(f'Keeping current JWT key: {jwt_key}') + kept_count += 1 + else: + print_info(f'Deleting old JWT key: {jwt_key}') + delete_output = _run( + f'az apim nv delete --service-name "{apim_name}" --resource-group "{resource_group_name}" --named-value-id "{jwt_key}" --yes', + f'Deleted old JWT key: {jwt_key}', + f'Failed to delete JWT key: {jwt_key}', + print_errors = False + ) + + if delete_output.success: + deleted_count += 1 + + # Summary + print_success(f"JWT signing key cleanup completed for sample '{sample_folder}'. Deleted {deleted_count} old key(s), kept {kept_count}.", blank_above = True) + return True + + except Exception as e: + print_error(f'Error during JWT key cleanup: {str(e)}') + return False + +def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resource_group_name: str, max_wait_minutes: int = 10) -> bool: + """ + Check if APIM's managed identity has Storage Blob Data Reader permissions on the storage account. + Waits for role assignments to propagate across Azure AD, which can take several minutes. + + Args: + apim_name (str): The name of the API Management service. + storage_account_name (str): The name of the storage account. + resource_group_name (str): The name of the resource group. + max_wait_minutes (int, optional): Maximum time to wait for permissions to propagate. Defaults to 10. + + Returns: + bool: True if APIM has the required permissions, False otherwise. + """ + + print_info(f"🔍 Checking if APIM '{apim_name}' has Storage Blob Data Reader permissions on '{storage_account_name}' in resource group '{resource_group_name}'...") + + # Storage Blob Data Reader role definition ID + blob_reader_role_id = get_azure_role_guid('StorageBlobDataReader') + + # Get APIM's managed identity principal ID + print_info('Getting APIM managed identity...') + apim_identity_output = _run( + f'az apim show --name {apim_name} --resource-group {resource_group_name} --query identity.principalId -o tsv', + error_message='Failed to get APIM managed identity', + print_command_to_run=True + ) + + if not apim_identity_output.success or not apim_identity_output.text.strip(): + print_error('Could not retrieve APIM managed identity principal ID') + return False + + principal_id = apim_identity_output.text.strip() + print_info(f'APIM managed identity principal ID: {principal_id}') # Get storage account resource ID + # Remove suppression flags to get raw output, then extract resource ID with regex + storage_account_output = _run( + f'az storage account show --name {storage_account_name} --resource-group {resource_group_name} --query id -o tsv', + error_message='Failed to get storage account resource ID', + print_command_to_run=True + ) + + if not storage_account_output.success: + print_error('Could not retrieve storage account resource ID') + return False + + # Extract resource ID using regex pattern, ignoring any warning text + resource_id_pattern = r'/subscriptions/[a-f0-9-]+/resourceGroups/[^/]+/providers/Microsoft\.Storage/storageAccounts/[^/\s]+' + match = re.search(resource_id_pattern, storage_account_output.text) + + if not match: + print_error('Could not parse storage account resource ID from output') + return False + + storage_account_id = match.group(0) + + # Check for role assignment with retry logic for propagation + max_wait_seconds = max_wait_minutes * 60 + wait_interval = 30 # Check every 30 seconds + elapsed_time = 0 + + print_info(f'Checking role assignment (will wait up to {max_wait_minutes} minute(s) for propagation)...') + + while elapsed_time < max_wait_seconds: + # Check if role assignment exists + role_assignment_output = _run( + f"az role assignment list --assignee {principal_id} --scope {storage_account_id} --role {blob_reader_role_id} --query '[0].id' -o tsv", + error_message='Failed to check role assignment', + print_command_to_run=True, + print_errors=False + ) + + if role_assignment_output.success and role_assignment_output.text.strip(): + print_success(f'Role assignment found! APIM managed identity has Storage Blob Data Reader permissions.') + + # Additional check: try to test blob access using the managed identity + print_info('Testing actual blob access...') + test_access_output = _run( + f"az storage blob list --account-name {storage_account_name} --container-name samples --auth-mode login --only-show-errors --query '[0].name' -o tsv 2>/dev/null || echo 'access-test-failed'", + error_message='', + print_command_to_run=True, + print_errors=False + ) + + if test_access_output.success and test_access_output.text.strip() != 'access-test-failed': + print_success('Blob access test successful!') + return True + else: + print_warning('Role assignment exists but blob access test failed. Permissions may still be propagating...') + + if elapsed_time == 0: + print_info(f'Role assignment not found yet. Waiting for Azure AD propagation...') + else: + print_info(f'Still waiting... ({elapsed_time // 60}m {elapsed_time % 60}s elapsed)') + + if elapsed_time + wait_interval >= max_wait_seconds: + break + + time.sleep(wait_interval) + elapsed_time += wait_interval + + print_error(f'Timeout: Role assignment not found after {max_wait_minutes} minutes.') + print_info('This is likely due to Azure AD propagation delays. You can:') + print_info('1. Wait a few more minutes and try again') + print_info('2. Manually verify the role assignment in the Azure portal') + print_info('3. Check the deployment logs for any errors') + + return False + +def find_infrastructure_instances(infrastructure: INFRASTRUCTURE) -> list[tuple[INFRASTRUCTURE, int | None]]: + """ + Find all instances of a specific infrastructure type by querying Azure resource groups. + + Args: + infrastructure (INFRASTRUCTURE): The infrastructure type to search for. + + Returns: + list: List of tuples (infrastructure, index) for found instances. + """ + + instances = [] + + # Query Azure for resource groups with the infrastructure tag + query_cmd = f'az group list --tag infrastructure={infrastructure.value} --query "[].name" -o tsv' + output = _run(query_cmd, print_command_to_run = False, print_errors = False) + + if output.success and output.text.strip(): + rg_names = [name.strip() for name in output.text.strip().split('\n') if name.strip()] + + for rg_name in rg_names: + # Parse the resource group name to extract the index + # Expected format: apim-infra-{infrastructure}-{index} or apim-infra-{infrastructure} + prefix = f'apim-infra-{infrastructure.value}' + + if rg_name == prefix: + # No index + instances.append((infrastructure, None)) + elif rg_name.startswith(prefix + '-'): + # Has index + try: + index_str = rg_name[len(prefix + '-'):] + index = int(index_str) + instances.append((infrastructure, index)) + except ValueError: + # Invalid index format, skip + continue + + return instances + +def create_resource_group(rg_name: str, resource_group_location: str | None = None, tags: dict | None = None) -> None: + """ + Create a resource group in Azure if it does not already exist. + + Args: + rg_name (str): Name of the resource group. + resource_group_location (str, optional): Azure region for the resource group. + tags (dict, optional): Additional tags to apply to the resource group. + + Returns: + None + """ + + if not does_resource_group_exist(rg_name): + # Build the tags string for the Azure CLI command + tag_string = 'source=apim-sample' + if tags: + for key, value in tags.items(): + # Escape values that contain spaces or special characters + escaped_value = value.replace('"', '\\"') if isinstance(value, str) else str(value) + tag_string += f' {key}=\"{escaped_value}\"' + + _run(f'az group create --name {rg_name} --location {resource_group_location} --tags {tag_string}', + f"Resource group '{rg_name}' created", + f"Failed to create the resource group '{rg_name}'", + False, False, False, False) + +def get_azure_role_guid(role_name: str) -> Optional[str]: + """ + Load the Azure roles JSON file and return the GUID for the specified role name. + + Args: + role_name (str): The name of the Azure role (e.g., 'StorageBlobDataReader'). + + Returns: + Optional[str]: The GUID of the role if found, None if not found or file cannot be loaded. + """ + try: + # Get the directory of the current script to build the path to azure-roles.json + current_dir = os.path.dirname(os.path.abspath(__file__)) + roles_file_path = os.path.join(current_dir, '..', 'azure-roles.json') + + # Normalize the path for cross-platform compatibility + roles_file_path = os.path.normpath(roles_file_path) + + # Load the JSON file + with open(roles_file_path, 'r', encoding='utf-8') as file: + roles_data: dict[str, str] = json.load(file) + + # Return the GUID for the specified role name + return roles_data.get(role_name) + + except (FileNotFoundError, json.JSONDecodeError, OSError) as e: + print_error(f'Failed to load Azure roles from {roles_file_path}: {str(e)}') + + return None + +def does_resource_group_exist(resource_group_name: str) -> bool: + """ + Check if a resource group exists in the current Azure subscription. + + Args: + resource_group_name (str): The name of the resource group to check. + + Returns: + bool: True if the resource group exists, False otherwise. + """ + + output = _run(f'az group show --name {resource_group_name} -o json', print_command_to_run = False, print_errors = False) + + return output.success + +def get_resource_group_location(resource_group_name: str) -> str | None: + """ + Get the location of an existing resource group. + + Args: + resource_group_name (str): The name of the resource group. + + Returns: + str | None: The location of the resource group if found, otherwise None. + """ + + output = _run(f'az group show --name {resource_group_name} --query "location" -o tsv', print_command_to_run = False, print_errors = False) + + if output.success and output.text.strip(): + return output.text.strip() + + return None + +def get_account_info() -> Tuple[str, str, str, str]: + """ + Retrieve the current Azure account information using the Azure CLI. + + Returns: + tuple: (current_user, current_user_id, tenant_id, subscription_id) + + Raises: + Exception: If account information cannot be retrieved. + """ + + account_show_output = _run('az account show', 'Retrieved az account', 'Failed to get the current az account', print_command_to_run = False) + ad_user_show_output = _run('az ad signed-in-user show', 'Retrieved az ad signed-in-user', 'Failed to get the current az ad signed-in-user', print_command_to_run = False) + + if account_show_output.success and account_show_output.json_data and ad_user_show_output.success and ad_user_show_output.json_data: + current_user = account_show_output.json_data['user']['name'] + tenant_id = account_show_output.json_data['tenantId'] + subscription_id = account_show_output.json_data['id'] + current_user_id = ad_user_show_output.json_data['id'] + + print_val('Current user', current_user) + print_val('Current user ID', current_user_id) + print_val('Tenant ID', tenant_id) + print_val('Subscription ID', subscription_id) + + return current_user, current_user_id, tenant_id, subscription_id + else: + error = 'Failed to retrieve account information. Please ensure the Azure CLI is installed, you are logged in, and the subscription is set correctly.' + print_error(error) + raise Exception(error) + +def get_deployment_name(directory_name: str | None = None) -> str: + """ + Get a standardized deployment name based on the working directory. + + Args: + directory_name (str | None): Optional directory name. If None, uses current working directory. + + Returns: + str: The deployment name based on the directory. + """ + + if directory_name is None: + directory_name = os.path.basename(os.getcwd()) + + deployment_name = f'deploy-{directory_name}-{int(time.time())}' + + print_val('Deployment name', deployment_name) + return deployment_name + +def get_frontdoor_url(deployment_name: INFRASTRUCTURE, rg_name: str) -> str | None: + """ + Retrieve the secure URL for the first endpoint in the first Azure Front Door Standard/Premium profile in the specified resource group. + + Args: + deployment_name (INFRASTRUCTURE): The infrastructure deployment enum value. Should be INFRASTRUCTURE.AFD_APIM_PE for AFD scenarios. + rg_name (str): The name of the resource group containing the Front Door profile. + + Returns: + str | None: The secure URL (https) of the first endpoint if found, otherwise None. + """ + + afd_endpoint_url: str | None = None + + if deployment_name == INFRASTRUCTURE.AFD_APIM_PE: + output = _run(f'az afd profile list -g {rg_name} -o json') + + if output.success and output.json_data: + afd_profile_name = output.json_data[0]['name'] + print_ok(f'Front Door Profile Name: {afd_profile_name}', blank_above = False) + + if afd_profile_name: + output = _run(f'az afd endpoint list -g {rg_name} --profile-name {afd_profile_name} -o json') + + if output.success and output.json_data: + afd_hostname = output.json_data[0]['hostName'] + + if afd_hostname: + afd_endpoint_url = f'https://{afd_hostname}' + + if afd_endpoint_url: + print_ok(f'Front Door Endpoint URL: {afd_endpoint_url}', blank_above = False) + else: + print_warning('No Front Door endpoint URL found.') + + return afd_endpoint_url + +def get_apim_url(rg_name: str) -> str | None: + """ + Retrieve the gateway URL for the API Management service in the specified resource group. + + Args: + rg_name (str): The name of the resource group containing the APIM service. + + Returns: + str | None: The gateway URL (https) of the APIM service if found, otherwise None. + """ + + apim_endpoint_url: str | None = None + + output = _run(f'az apim list -g {rg_name} -o json', print_command_to_run = False) + + if output.success and output.json_data: + apim_gateway_url = output.json_data[0]['gatewayUrl'] + print_ok(f'APIM Service Name: {output.json_data[0]["name"]}', blank_above = False) + + if apim_gateway_url: + apim_endpoint_url = apim_gateway_url + + if apim_endpoint_url: + print_ok(f'APIM Gateway URL: {apim_endpoint_url}', blank_above = False) + else: + print_warning('No APIM gateway URL found.') + + return apim_endpoint_url + +def get_appgw_endpoint(rg_name: str) -> Tuple[str | None, str | None]: + """ + Retrieve the hostname and public IP address for the Application Gateway in the specified resource group. + + Args: + rg_name (str): The name of the resource group containing the Application Gateway. + + Returns: + Tuple[str | None, str | None]: A tuple containing (hostname, public_ip) if found, otherwise (None, None). + """ + + hostname: str | None = None + public_ip: str | None = None + + # Get Application Gateway details + output = _run(f'az network application-gateway list -g {rg_name} -o json', print_command_to_run = False) + + if output.success and output.json_data: + appgw_name = output.json_data[0]['name'] + print_ok(f'Application Gateway Name: {appgw_name}', blank_above = False) + + # Get hostname + http_listeners = output.json_data[0].get('httpListeners', []) + + for listener in http_listeners: + # Assume that only a single hostname is used, not the hostnames array + if listener.get('hostName'): + hostname = listener['hostName'] + + # Get frontend IP configuration to find public IP reference + frontend_ip_configs = output.json_data[0].get('frontendIPConfigurations', []) + public_ip_id = None + + for config in frontend_ip_configs: + if config.get('publicIPAddress'): + public_ip_id = config['publicIPAddress']['id'] + break + + if public_ip_id: + # Extract public IP name from the resource ID + public_ip_name = public_ip_id.split('/')[-1] + + # Get public IP details + ip_output = _run(f'az network public-ip show -g {rg_name} -n {public_ip_name} -o json', print_command_to_run = False) + + if ip_output.success and ip_output.json_data: + public_ip = ip_output.json_data.get('ipAddress') + + return hostname, public_ip + +def get_infra_rg_name(deployment_name: INFRASTRUCTURE, index: int | None = None) -> str: + """ + Generate a resource group name for infrastructure deployments, optionally with an index. + + Args: + deployment_name (INFRASTRUCTURE): The infrastructure deployment enum value. + index (int | None): An optional index to append to the name. Defaults to None. + + Returns: + str: The generated resource group name. + """ + + rg_name = f'apim-infra-{deployment_name.value}' + + if index is not None: + rg_name = f'{rg_name}-{index}' + + return rg_name + +def get_unique_suffix_for_resource_group(rg_name: str) -> str: + """ + Get the exact uniqueString value that Bicep/ARM generates for a resource group. + + Uses a minimal ARM deployment to ensure the value matches exactly what + Bicep's uniqueString(subscription().id, resourceGroup().id) produces. + + Args: + rg_name (str): The resource group name (must already exist). + + Returns: + str: The 13-character unique string matching Bicep's uniqueString output. + """ + + # Minimal ARM template that just outputs the uniqueString + template = json.dumps({ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "resources": [], + "outputs": { + "suffix": { + "type": "string", + "value": "[uniqueString(subscription().id, resourceGroup().id)]" + } + } + }) + + # Write template to temp file + with tempfile.NamedTemporaryFile(mode = 'w', suffix = '.json', delete = False) as f: + f.write(template) + template_path = f.name + + try: + deployment_name = f'get-suffix-{int(time.time())}' + output = _run( + f'az deployment group create --name {deployment_name} --resource-group {rg_name} --template-file "{template_path}" --query "properties.outputs.suffix.value" -o tsv', + print_command_to_run = False, + print_errors = False + ) + + if output.success and output.text.strip(): + return output.text.strip() + + print_error('Could not get uniqueString from Azure.') + return '' + finally: + try: + os.unlink(template_path) + except Exception: + pass + +def get_rg_name(deployment_name: str, index: int | None = None) -> str: + """ + Generate a resource group name for a sample deployment, optionally with an index. + + Args: + deployment_name (str): The base name for the deployment. + index (int | None): An optional index to append to the name. + + Returns: + str: The generated resource group name. + """ + + rg_name = f'apim-sample-{deployment_name}' + + if index is not None: + rg_name = f'{rg_name}-{str(index)}' + + print_val('Resource group name', rg_name) + return rg_name + +def get_endpoints(deployment: INFRASTRUCTURE, rg_name: str) -> Endpoints: + """ + Retrieve all possible endpoints for a given infrastructure deployment. + + Args: + deployment (INFRASTRUCTURE): The infrastructure deployment enum value. + rg_name (str): The name of the resource group. + + Returns: + Endpoints: An object containing all discovered endpoints. + """ + + print_message(f'Identifying possible endpoints for infrastructure {deployment}...') + + endpoints = Endpoints(deployment) + + endpoints.afd_endpoint_url = get_frontdoor_url(deployment, rg_name) + endpoints.apim_endpoint_url = get_apim_url(rg_name) + endpoints.appgw_hostname, endpoints.appgw_public_ip = get_appgw_endpoint(rg_name) + + return endpoints diff --git a/shared/python/utils.py b/shared/python/utils.py index 04ab8f3..e4a791d 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -6,7 +6,6 @@ import json import sys import os -import re import subprocess import time import traceback @@ -16,32 +15,19 @@ import inspect from pathlib import Path import apimtypes -import tempfile -import os as temp_os -from typing import Any, Optional, Tuple +from typing import Any from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, _get_project_root # ------------------------------ # RE-EXPORTS (BACKWARD COMPATIBILITY) # ------------------------------ # -# The following imports are re-exported from the console module to maintain -# backward compatibility with existing code. This allows files that currently -# use `utils.print_*()` to continue working without modification. -# -# For new code, consider importing directly from the console module: -# from console import print_info, print_error -# -# This re-export pattern allows for gradual migration while keeping the codebase -# functional during refactoring. -# -from console import ( - BOLD_B, BOLD_C, BOLD_G, BOLD_M, BOLD_R, BOLD_W, BOLD_Y, - CONSOLE_WIDTH, RESET, THREAD_COLORS, _print_lock, _print_log, - print_command, print_error, print_header, print_info, print_message, print_ok, print_success, print_val, print_warning -) - +# The following imports are re-exported from the modules that are now split out from utils. +# The re-exports are in place to maintain backward compatibility with existing code. +# For new code, please import directly from the relevant modules. +from console import * +from azure_resources import * # ------------------------------ # HELPER FUNCTIONS @@ -302,7 +288,7 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | available_options = [] for infra in self.supported_infrastructures: - infra_options = self._find_infrastructure_instances(infra) + infra_options = find_infrastructure_instances(infra) available_options.extend(infra_options) # Check if the desired infrastructure/index combination exists @@ -424,46 +410,6 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | print_error('Invalid input. Please enter a number.') - def _find_infrastructure_instances(self, infrastructure: INFRASTRUCTURE) -> list[tuple[INFRASTRUCTURE, int | None]]: - """ - Find all instances of a specific infrastructure type by querying Azure resource groups. - - Args: - infrastructure (INFRASTRUCTURE): The infrastructure type to search for. - - Returns: - list: List of tuples (infrastructure, index) for found instances. - """ - - instances = [] - - # Query Azure for resource groups with the infrastructure tag - query_cmd = f'az group list --tag infrastructure={infrastructure.value} --query "[].name" -o tsv' - output = run(query_cmd, print_command_to_run = False, print_errors = False) - - if output.success and output.text.strip(): - rg_names = [name.strip() for name in output.text.strip().split('\n') if name.strip()] - - for rg_name in rg_names: - # Parse the resource group name to extract the index - # Expected format: apim-infra-{infrastructure}-{index} or apim-infra-{infrastructure} - prefix = f'apim-infra-{infrastructure.value}' - - if rg_name == prefix: - # No index - instances.append((infrastructure, None)) - elif rg_name.startswith(prefix + '-'): - # Has index - try: - index_str = rg_name[len(prefix + '-'):] - index = int(index_str) - instances.append((infrastructure, index)) - except ValueError: - # Invalid index format, skip - continue - - return instances - # ------------------------------ # PUBLIC METHODS # ------------------------------ @@ -541,7 +487,6 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: # PRIVATE METHODS # ------------------------------ - def _determine_bicep_directory(infrastructure_dir: str) -> str: """ Determine the correct Bicep directory based on the current working directory and infrastructure directory name. @@ -598,36 +543,6 @@ def _determine_bicep_directory(infrastructure_dir: str) -> str: # PUBLIC METHODS # ------------------------------ -def get_azure_role_guid(role_name: str) -> Optional[str]: - """ - Load the Azure roles JSON file and return the GUID for the specified role name. - - Args: - role_name (str): The name of the Azure role (e.g., 'StorageBlobDataReader'). - - Returns: - Optional[str]: The GUID of the role if found, None if not found or file cannot be loaded. - """ - try: - # Get the directory of the current script to build the path to azure-roles.json - current_dir = os.path.dirname(os.path.abspath(__file__)) - roles_file_path = os.path.join(current_dir, '..', 'azure-roles.json') - - # Normalize the path for cross-platform compatibility - roles_file_path = os.path.normpath(roles_file_path) - - # Load the JSON file - with open(roles_file_path, 'r', encoding='utf-8') as file: - roles_data: dict[str, str] = json.load(file) - - # Return the GUID for the specified role name - return roles_data.get(role_name) - - except (FileNotFoundError, json.JSONDecodeError, OSError) as e: - print_error(f'Failed to load Azure roles from {roles_file_path}: {str(e)}') - - return None - def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: str | INFRASTRUCTURE, bicep_parameters: dict, bicep_parameters_file: str = 'params.json', rg_tags: dict | None = None, is_debug: bool = False) -> Output: """ Create a Bicep deployment in a resource group, writing parameters to a file and running the deployment. @@ -771,33 +686,6 @@ def create_bicep_deployment_group_for_sample(sample_name: str, rg_name: str, rg_ os.chdir(original_cwd) print(f'📁 Restored working directory to: {original_cwd}') -def create_resource_group(rg_name: str, resource_group_location: str | None = None, tags: dict | None = None) -> None: - """ - Create a resource group in Azure if it does not already exist. - - Args: - rg_name (str): Name of the resource group. - resource_group_location (str, optional): Azure region for the resource group. - tags (dict, optional): Additional tags to apply to the resource group. - - Returns: - None - """ - - if not does_resource_group_exist(rg_name): - # Build the tags string for the Azure CLI command - tag_string = 'source=apim-sample' - if tags: - for key, value in tags.items(): - # Escape values that contain spaces or special characters - escaped_value = value.replace('"', '\\"') if isinstance(value, str) else str(value) - tag_string += f' {key}=\"{escaped_value}\"' - - run(f'az group create --name {rg_name} --location {resource_group_location} --tags {tag_string}', - f"Resource group '{rg_name}' created", - f"Failed to create the resource group '{rg_name}'", - False, False, False, False) - def _prompt_for_infrastructure_update(rg_name: str) -> tuple[bool, int | None]: """ Prompt the user for infrastructure update confirmation. @@ -904,34 +792,6 @@ def does_infrastructure_exist(infrastructure: INFRASTRUCTURE, index: int, allow_ print(' Infrastructure does not yet exist.') return False -def does_resource_group_exist(rg_name: str) -> bool: - """ - Check if a resource group exists in Azure. - - Args: - rg_name (str): Name of the resource group. - - Returns: - bool: True if the resource group exists, False otherwise. - """ - - output = run(f'az group show --name {rg_name}', print_command_to_run = False, print_output = False, print_errors = False) - return output.success - -def get_resource_group_location(rg_name: str) -> str: - """ - Get the location of a resource group. - - Args: - rg_name (str): Name of the resource group. - - Returns: - str: The location of the resource group, or 'Unknown' if not found. - """ - - output = run(f'az group show --name {rg_name} --query location -o tsv', print_command_to_run = False, print_output = False, print_errors = False) - return output.text.strip() if output.success and output.text.strip() else 'Unknown' - def read_and_modify_policy_xml(policy_xml_filepath: str, replacements: dict[str, str], sample_name: str = None) -> str: """ Read and return the contents of a policy XML file, then modifies it by replacing placeholders with provided values. @@ -1061,260 +921,6 @@ def read_policy_xml(policy_xml_filepath_or_filename: str, named_values: dict[str return policy_template_xml -def get_account_info() -> Tuple[str, str, str, str]: - """ - Retrieve the current Azure account information using the Azure CLI. - - Returns: - tuple: (current_user, current_user_id, tenant_id, subscription_id) - - Raises: - Exception: If account information cannot be retrieved. - """ - - account_show_output = run('az account show', 'Retrieved az account', 'Failed to get the current az account', print_command_to_run = False) - ad_user_show_output = run('az ad signed-in-user show', 'Retrieved az ad signed-in-user', 'Failed to get the current az ad signed-in-user', print_command_to_run = False) - - if account_show_output.success and account_show_output.json_data and ad_user_show_output.success and ad_user_show_output.json_data: - current_user = account_show_output.json_data['user']['name'] - tenant_id = account_show_output.json_data['tenantId'] - subscription_id = account_show_output.json_data['id'] - current_user_id = ad_user_show_output.json_data['id'] - - print_val('Current user', current_user) - print_val('Current user ID', current_user_id) - print_val('Tenant ID', tenant_id) - print_val('Subscription ID', subscription_id) - - return current_user, current_user_id, tenant_id, subscription_id - else: - error = 'Failed to retrieve account information. Please ensure the Azure CLI is installed, you are logged in, and the subscription is set correctly.' - print_error(error) - raise Exception(error) - -def get_deployment_name() -> str: - - """ - Get the deployment name based on the directory of the currently running Jupyter notebook. - - Returns: - str: The deployment name, derived from the current working directory. - """ - - notebook_path = os.path.basename(os.getcwd()) - - if not notebook_path: - raise RuntimeError('Notebook path could not be determined.') - - print_val('Deployment name', notebook_path) - - return notebook_path - -def get_frontdoor_url(deployment_name: INFRASTRUCTURE, rg_name: str) -> str | None: - """ - Retrieve the secure URL for the first endpoint in the first Azure Front Door Standard/Premium profile in the specified resource group. - - Args: - deployment_name (INFRASTRUCTURE): The infrastructure deployment enum value. Should be INFRASTRUCTURE.AFD_APIM_PE for AFD scenarios. - rg_name (str): The name of the resource group containing the Front Door profile. - - Returns: - str | None: The secure URL (https) of the first endpoint if found, otherwise None. - """ - - afd_endpoint_url: str | None = None - - if deployment_name == INFRASTRUCTURE.AFD_APIM_PE: - output = run(f'az afd profile list -g {rg_name} -o json') - - if output.success and output.json_data: - afd_profile_name = output.json_data[0]['name'] - print_ok(f'Front Door Profile Name: {afd_profile_name}', blank_above = False) - - if afd_profile_name: - output = run(f'az afd endpoint list -g {rg_name} --profile-name {afd_profile_name} -o json') - - if output.success and output.json_data: - afd_hostname = output.json_data[0]['hostName'] - - if afd_hostname: - afd_endpoint_url = f'https://{afd_hostname}' - - if afd_endpoint_url: - print_ok(f'Front Door Endpoint URL: {afd_endpoint_url}', blank_above = False) - else: - print_warning('No Front Door endpoint URL found.') - - return afd_endpoint_url - -def get_apim_url(rg_name: str) -> str | None: - """ - Retrieve the gateway URL for the API Management service in the specified resource group. - - Args: - rg_name (str): The name of the resource group containing the APIM service. - - Returns: - str | None: The gateway URL (https) of the APIM service if found, otherwise None. - """ - - apim_endpoint_url: str | None = None - - output = run(f'az apim list -g {rg_name} -o json', print_command_to_run = False) - - if output.success and output.json_data: - apim_gateway_url = output.json_data[0]['gatewayUrl'] - print_ok(f'APIM Service Name: {output.json_data[0]["name"]}', blank_above = False) - - if apim_gateway_url: - apim_endpoint_url = apim_gateway_url - - if apim_endpoint_url: - print_ok(f'APIM Gateway URL: {apim_endpoint_url}', blank_above = False) - else: - print_warning('No APIM gateway URL found.') - - return apim_endpoint_url - -def get_appgw_endpoint(rg_name: str) -> tuple[str | None, str | None]: - """ - Retrieve the hostname and public IP address for the Application Gateway in the specified resource group. - - Args: - rg_name (str): The name of the resource group containing the Application Gateway. - - Returns: - tuple[str | None, str | None]: A tuple containing (hostname, public_ip) if found, otherwise (None, None). - """ - - hostname: str | None = None - public_ip: str | None = None - - # Get Application Gateway details - output = run(f'az network application-gateway list -g {rg_name} -o json', print_command_to_run = False) - - if output.success and output.json_data: - appgw_name = output.json_data[0]['name'] - print_ok(f'Application Gateway Name: {appgw_name}', blank_above = False) - - # Get hostname - http_listeners = output.json_data[0].get('httpListeners', []) - - for listener in http_listeners: - # Assume that only a single hostname is used, not the hostnames array - if listener.get('hostName'): - hostname = listener['hostName'] - - # Get frontend IP configuration to find public IP reference - frontend_ip_configs = output.json_data[0].get('frontendIPConfigurations', []) - public_ip_id = None - - for config in frontend_ip_configs: - if config.get('publicIPAddress'): - public_ip_id = config['publicIPAddress']['id'] - break - - if public_ip_id: - # Extract public IP name from the resource ID - public_ip_name = public_ip_id.split('/')[-1] - - # Get public IP details - ip_output = run(f'az network public-ip show -g {rg_name} -n {public_ip_name} -o json', print_command_to_run = False) - - if ip_output.success and ip_output.json_data: - public_ip = ip_output.json_data.get('ipAddress') - - return hostname, public_ip - -def get_infra_rg_name(deployment_name: INFRASTRUCTURE, index: int | None = None) -> str: - """ - Generate a resource group name for infrastructure deployments, optionally with an index. - - Args: - deployment_name (INFRASTRUCTURE): The infrastructure deployment enum value. - index (int | None): An optional index to append to the name. Defaults to None. - - Returns: - str: The generated resource group name. - """ - - rg_name = f'apim-infra-{deployment_name.value}' - - if index is not None: - rg_name = f'{rg_name}-{index}' - - return rg_name - -def get_unique_suffix_for_resource_group(rg_name: str) -> str: - """ - Get the exact uniqueString value that Bicep/ARM generates for a resource group. - - Uses a minimal ARM deployment to ensure the value matches exactly what - Bicep's uniqueString(subscription().id, resourceGroup().id) produces. - - Args: - rg_name (str): The resource group name (must already exist). - - Returns: - str: The 13-character unique string matching Bicep's uniqueString output. - """ - - # Minimal ARM template that just outputs the uniqueString - template = json.dumps({ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "resources": [], - "outputs": { - "suffix": { - "type": "string", - "value": "[uniqueString(subscription().id, resourceGroup().id)]" - } - } - }) - - # Write template to temp file - with tempfile.NamedTemporaryFile(mode = 'w', suffix = '.json', delete = False) as f: - f.write(template) - template_path = f.name - - try: - deployment_name = f'get-suffix-{int(time.time())}' - output = run( - f'az deployment group create --name {deployment_name} --resource-group {rg_name} --template-file "{template_path}" --query "properties.outputs.suffix.value" -o tsv', - print_command_to_run = False, - print_errors = False - ) - - if output.success and output.text.strip(): - return output.text.strip() - - print_error('Could not get uniqueString from Azure.') - finally: - try: - temp_os.unlink(template_path) - except Exception: - pass - -def get_rg_name(deployment_name: str, index: int | None = None) -> str: - """ - Generate a resource group name for a sample deployment, optionally with an index. - - Args: - deployment_name (str): The base name for the deployment. - index (int, optional): An optional index to append to the name. - - Returns: - str: The generated resource group name. - """ - - rg_name = f'apim-sample-{deployment_name}' - - if index is not None: - rg_name = f'{rg_name}-{str(index)}' - - print_val('Resource group name', rg_name) - return rg_name - def run(command: str, ok_message: str = '', error_message: str = '', print_output: bool = False, print_command_to_run: bool = True, print_errors: bool = True, print_warnings: bool = True) -> Output: """ Execute a shell command, log the command and its output, and attempt to extract JSON from the output. @@ -1428,114 +1034,6 @@ def generate_signing_key() -> tuple[str, str]: return random_string, b64 -def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resource_group_name: str, max_wait_minutes: int = 10) -> bool: - """ - Check if APIM's managed identity has Storage Blob Data Reader permissions on the storage account. - Waits for role assignments to propagate across Azure AD, which can take several minutes. - - Args: - apim_name (str): The name of the API Management service. - storage_account_name (str): The name of the storage account. - resource_group_name (str): The name of the resource group. - max_wait_minutes (int, optional): Maximum time to wait for permissions to propagate. Defaults to 10. - - Returns: - bool: True if APIM has the required permissions, False otherwise. - """ - - print_info(f"🔍 Checking if APIM '{apim_name}' has Storage Blob Data Reader permissions on '{storage_account_name}' in resource group '{resource_group_name}'...") - - # Storage Blob Data Reader role definition ID - blob_reader_role_id = get_azure_role_guid('StorageBlobDataReader') - - # Get APIM's managed identity principal ID - print_info('Getting APIM managed identity...') - apim_identity_output = run( - f'az apim show --name {apim_name} --resource-group {resource_group_name} --query identity.principalId -o tsv', - error_message='Failed to get APIM managed identity', - print_command_to_run=True - ) - - if not apim_identity_output.success or not apim_identity_output.text.strip(): - print_error('Could not retrieve APIM managed identity principal ID') - return False - - principal_id = apim_identity_output.text.strip() - print_info(f'APIM managed identity principal ID: {principal_id}') # Get storage account resource ID - # Remove suppression flags to get raw output, then extract resource ID with regex - storage_account_output = run( - f'az storage account show --name {storage_account_name} --resource-group {resource_group_name} --query id -o tsv', - error_message='Failed to get storage account resource ID', - print_command_to_run=True - ) - - if not storage_account_output.success: - print_error('Could not retrieve storage account resource ID') - return False - - # Extract resource ID using regex pattern, ignoring any warning text - resource_id_pattern = r'/subscriptions/[a-f0-9-]+/resourceGroups/[^/]+/providers/Microsoft\.Storage/storageAccounts/[^/\s]+' - match = re.search(resource_id_pattern, storage_account_output.text) - - if not match: - print_error('Could not parse storage account resource ID from output') - return False - - storage_account_id = match.group(0) - - # Check for role assignment with retry logic for propagation - max_wait_seconds = max_wait_minutes * 60 - wait_interval = 30 # Check every 30 seconds - elapsed_time = 0 - - print_info(f'Checking role assignment (will wait up to {max_wait_minutes} minute(s) for propagation)...') - - while elapsed_time < max_wait_seconds: - # Check if role assignment exists - role_assignment_output = run( - f"az role assignment list --assignee {principal_id} --scope {storage_account_id} --role {blob_reader_role_id} --query '[0].id' -o tsv", - error_message='Failed to check role assignment', - print_command_to_run=True, - print_errors=False - ) - - if role_assignment_output.success and role_assignment_output.text.strip(): - print_success(f'Role assignment found! APIM managed identity has Storage Blob Data Reader permissions.') - - # Additional check: try to test blob access using the managed identity - print_info('Testing actual blob access...') - test_access_output = run( - f"az storage blob list --account-name {storage_account_name} --container-name samples --auth-mode login --only-show-errors --query '[0].name' -o tsv 2>/dev/null || echo 'access-test-failed'", - error_message='', - print_command_to_run=True, - print_errors=False - ) - - if test_access_output.success and test_access_output.text.strip() != 'access-test-failed': - print_success('Blob access test successful!') - return True - else: - print_warning('Role assignment exists but blob access test failed. Permissions may still be propagating...') - - if elapsed_time == 0: - print_info(f'Role assignment not found yet. Waiting for Azure AD propagation...') - else: - print_info(f'Still waiting... ({elapsed_time // 60}m {elapsed_time % 60}s elapsed)') - - if elapsed_time + wait_interval >= max_wait_seconds: - break - - time.sleep(wait_interval) - elapsed_time += wait_interval - - print_error(f'Timeout: Role assignment not found after {max_wait_minutes} minutes.') - print_info('This is likely due to Azure AD propagation delays. You can:') - print_info('1. Wait a few more minutes and try again') - print_info('2. Manually verify the role assignment in the Azure portal') - print_info('3. Check the deployment logs for any errors') - - return False - def wait_for_apim_blob_permissions(apim_name: str, storage_account_name: str, resource_group_name: str, max_wait_minutes: int = 15) -> bool: """ Wait for APIM's managed identity to have Storage Blob Data Reader permissions on the storage account. @@ -1592,92 +1090,6 @@ def get_endpoints(deployment: INFRASTRUCTURE, rg_name: str) -> Endpoints: return endpoints -def cleanup_old_jwt_signing_keys(apim_name: str, resource_group_name: str, current_jwt_key_name: str) -> bool: - """ - Clean up old JWT signing keys from APIM named values for the same sample folder, keeping only the current key. - Uses regex matching to identify keys that belong to the same sample folder by extracting the sample folder - name from the current key and matching against the pattern 'JwtSigningKey-{sample_folder}-{timestamp}'. - - Args: - apim_name (str): Name of the APIM service - resource_group_name (str): Name of the resource group containing APIM - current_jwt_key_name (str): Name of the current JWT key to preserve (format: JwtSigningKey-{sample_folder}-{timestamp}) - - Returns: - bool: True if cleanup was successful, False otherwise - """ - - try: - print_message('🧹 Cleaning up old JWT signing keys for the same sample folder...', blank_above = True) - - # Extract sample folder name from current JWT key using regex - # Pattern: JwtSigningKey-{sample_folder}-{timestamp} - current_key_pattern = r'^JwtSigningKey-(.+)-\d+$' - current_key_match = re.match(current_key_pattern, current_jwt_key_name) - - if not current_key_match: - print_error(f"Current JWT key name '{current_jwt_key_name}' does not match expected pattern 'JwtSigningKey-{{sample_folder}}-{{timestamp}}'") - return False - - sample_folder = current_key_match.group(1) - print_info(f"Identified sample folder: '{sample_folder}'") - - # Get all named values that start with 'JwtSigningKey' - print_info(f"Getting all JWT signing key named values from APIM '{apim_name}'...") - - output = run( - f'az apim nv list --service-name "{apim_name}" --resource-group "{resource_group_name}" --query "[?contains(name, \'JwtSigningKey\')].name" -o tsv', - 'Retrieved JWT signing keys', - 'Failed to retrieve JWT signing keys' - ) - - if not output.success: - print_error('Failed to retrieve JWT signing keys from APIM') - return False - - if not output.text.strip(): - print_info('No JWT signing keys found. Nothing to clean up.') - return True - - # Parse the list of JWT keys - jwt_keys = [key.strip() for key in output.text.strip().split('\n') if key.strip()] - - # print_info(f'Found {len(jwt_keys)} total JWT signing keys.') - - # Filter keys that belong to the same sample folder using regex - sample_key_pattern = rf'^JwtSigningKey-{re.escape(sample_folder)}-\d+$' - sample_folder_keys = [key for key in jwt_keys if re.match(sample_key_pattern, key)] - - print_info(f"Found {len(sample_folder_keys)} JWT signing keys for sample folder '{sample_folder}'.") - - # Process each JWT key for this sample folder - deleted_count = 0 - kept_count = 0 - - for jwt_key in sample_folder_keys: - if jwt_key == current_jwt_key_name: - print_info(f'Keeping current JWT key: {jwt_key}') - kept_count += 1 - else: - print_info(f'Deleting old JWT key: {jwt_key}') - delete_output = run( - f'az apim nv delete --service-name "{apim_name}" --resource-group "{resource_group_name}" --named-value-id "{jwt_key}" --yes', - f'Deleted old JWT key: {jwt_key}', - f'Failed to delete JWT key: {jwt_key}', - print_errors = False - ) - - if delete_output.success: - deleted_count += 1 - - # Summary - print_success(f"JWT signing key cleanup completed for sample '{sample_folder}'. Deleted {deleted_count} old key(s), kept {kept_count}.", blank_above = True) - return True - - except Exception as e: - print_error(f'Error during JWT key cleanup: {str(e)}') - return False - def get_json(input: str) -> Any: """ Safely parse a JSON string or file content into a Python object. diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py new file mode 100644 index 0000000..2bf7d05 --- /dev/null +++ b/tests/python/test_azure_resources.py @@ -0,0 +1,466 @@ +""" +Tests for azure_resources module. +""" + +import json +import pytest +from unittest.mock import Mock, patch, mock_open, call + +from azure_resources import* +from apimtypes import INFRASTRUCTURE, Endpoints, Output + + +# ------------------------------ +# AZURE ROLE TESTS +# ------------------------------ + +def test_get_azure_role_guid_success(): + """Test successful retrieval of Azure role GUID.""" + + mock_data = {'Contributor': 'role-guid-12345', 'Reader': 'role-guid-67890'} + + with patch('builtins.open', mock_open(read_data=json.dumps(mock_data))): + result = get_azure_role_guid('Contributor') + + assert result == 'role-guid-12345' + + +def test_get_azure_role_guid_failure(): + """Test get_azure_role_guid returns None when file not found.""" + + with patch('builtins.open', side_effect=FileNotFoundError('File not found')): + result = get_azure_role_guid('NonExistentRole') + + assert result is None + +# ------------------------------ +# RESOURCE GROUP TESTS +# ------------------------------ + +def test_does_resource_group_exist_true(): + """Test checking if resource group exists - returns True.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, '{"name": "test-rg"}') + + result = does_resource_group_exist('test-rg') + + assert result is True + mock_run.assert_called_once_with( + 'az group show --name test-rg -o json', + print_command_to_run = False, + print_errors = False + ) + + +def test_does_resource_group_exist_false(): + """Test checking if resource group exists - returns False.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'ResourceGroupNotFound') + + result = does_resource_group_exist('nonexistent-rg') + + assert result is False + + +def test_get_resource_group_location_success(): + """Test successful retrieval of resource group location.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, 'eastus2\n') + + result = get_resource_group_location('test-rg') + + assert result == 'eastus2' + mock_run.assert_called_once_with( + 'az group show --name test-rg --query "location" -o tsv', + print_command_to_run = False, + print_errors = False + ) + + +def test_get_resource_group_location_failure(): + """Test get_resource_group_location returns None on failure.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'error message') + + result = get_resource_group_location('nonexistent-rg') + + assert result is None + + +def test_get_resource_group_location_empty(): + """Test get_resource_group_location returns None on empty response.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, '') + + result = get_resource_group_location('test-rg') + + assert result is None + + +# ------------------------------ +# ACCOUNT INFO TESTS +# ------------------------------ + +def test_get_account_info_success(): + """Test successful retrieval of account information.""" + + with patch('azure_resources._run') as mock_run: + account_output = Output(True, '{}') + account_output.json_data = { + 'user': {'name': 'test.user@example.com'}, + 'id': 'sub-12345', + 'tenantId': 'tenant-12345' + } + + ad_user_output = Output(True, '{}') + ad_user_output.json_data = {'id': 'user-id-12345'} + + mock_run.side_effect = [account_output, ad_user_output] + + current_user, current_user_id, tenant_id, subscription_id = get_account_info() + + assert current_user == 'test.user@example.com' + assert current_user_id == 'user-id-12345' + assert tenant_id == 'tenant-12345' + assert subscription_id == 'sub-12345' + + +def test_get_account_info_failure(): + """Test get_account_info raises exception on failure.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'authentication error') + + with pytest.raises(Exception) as exc_info: + get_account_info() + + assert 'Failed to retrieve account information' in str(exc_info.value) + + +def test_get_account_info_no_json(): + """Test get_account_info raises exception when no JSON data.""" + + with patch('azure_resources._run') as mock_run: + output = Output(True, 'some text') + output.json_data = None + mock_run.return_value = output + + with pytest.raises(Exception) as exc_info: + get_account_info() + + assert 'Failed to retrieve account information' in str(exc_info.value) + +# ------------------------------ +# DEPLOYMENT NAME TESTS +# ------------------------------ + +@patch('azure_resources.time.time') +@patch('azure_resources.os.path.basename') +@patch('azure_resources.os.getcwd') +def test_get_deployment_name_with_directory(mock_getcwd, mock_basename, mock_time): + """Test deployment name generation with explicit directory.""" + + mock_time.return_value = 1234567890 + + result = get_deployment_name('my-sample') + + assert result == 'deploy-my-sample-1234567890' + mock_getcwd.assert_not_called() + mock_basename.assert_not_called() + + +@patch('azure_resources.time.time') +@patch('azure_resources.os.path.basename') +@patch('azure_resources.os.getcwd') +def test_get_deployment_name_current_directory(mock_getcwd, mock_basename, mock_time): + """Test deployment name generation using current directory.""" + + mock_time.return_value = 1234567890 + mock_getcwd.return_value = '/path/to/current-folder' + mock_basename.return_value = 'current-folder' + + result = get_deployment_name() + + assert result == 'deploy-current-folder-1234567890' + mock_getcwd.assert_called_once() + mock_basename.assert_called_once_with('/path/to/current-folder') + + +# ------------------------------ +# FRONT DOOR TESTS +# ------------------------------ + +def test_get_frontdoor_url_afd_success(): + """Test successful Front Door URL retrieval.""" + + with patch('azure_resources._run') as mock_run: + # Create mock outputs + profile_output = Output(True, '') + profile_output.json_data = [{"name": "test-afd"}] + + endpoint_output = Output(True, '') + endpoint_output.json_data = [{"hostName": "test.azurefd.net"}] + + mock_run.side_effect = [profile_output, endpoint_output] + + result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + + assert result == 'https://test.azurefd.net' + + expected_calls = [ + call('az afd profile list -g test-rg -o json'), + call('az afd endpoint list -g test-rg --profile-name test-afd -o json') + ] + mock_run.assert_has_calls(expected_calls) + + +def test_get_frontdoor_url_wrong_infrastructure(): + """Test Front Door URL with wrong infrastructure type.""" + + with patch('azure_resources._run') as mock_run: + result = get_frontdoor_url(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') + + assert result is None + mock_run.assert_not_called() + + +def test_get_frontdoor_url_no_profile(): + """Test Front Door URL when no profile found.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'No profiles found') + + result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + + assert result is None + + +def test_get_frontdoor_url_no_endpoints(): + """Test Front Door URL when profile exists but no endpoints.""" + + with patch('azure_resources._run') as mock_run: + profile_output = Output(True, '') + profile_output.json_data = [{'name': 'test-afd'}] + endpoint_output = Output(False, 'No endpoints found') + mock_run.side_effect = [profile_output, endpoint_output] + + result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + + assert result is None + + +# ------------------------------ +# APIM URL TESTS +# ------------------------------ + +def test_get_apim_url_success(): + """Test successful APIM URL retrieval.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, '') + mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': 'https://test-apim.azure-api.net'}] + + result = get_apim_url('test-rg') + + assert result == 'https://test-apim.azure-api.net' + mock_run.assert_called_once_with( + 'az apim list -g test-rg -o json', + print_command_to_run = False + ) + + +def test_get_apim_url_failure(): + """Test APIM URL retrieval failure.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'No APIM services found') + + result = get_apim_url('test-rg') + + assert result is None + + +def test_get_apim_url_no_gateway(): + """Test APIM URL when service exists but no gateway URL.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, '') + mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': None}] + + result = get_apim_url('test-rg') + + assert result is None + + +# ------------------------------ +# APPLICATION GATEWAY TESTS +# ------------------------------ + +def test_get_appgw_endpoint_success(): + """Test successful Application Gateway endpoint retrieval.""" + + with patch('azure_resources._run') as mock_run: + appgw_output = Output(True, '') + appgw_output.json_data = [{ + 'name': 'test-appgw', + 'httpListeners': [{'hostName': 'api.contoso.com'}], + 'frontendIPConfigurations': [{ + 'publicIPAddress': {'id': '/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/test-pip'} + }] + }] + ip_output = Output(True, '') + ip_output.json_data = {'ipAddress': '1.2.3.4'} + mock_run.side_effect = [appgw_output, ip_output] + + hostname, ip = get_appgw_endpoint('test-rg') + + assert hostname == 'api.contoso.com' + assert ip == '1.2.3.4' + + expected_calls = [ + call('az network application-gateway list -g test-rg -o json', print_command_to_run = False), + call('az network public-ip show -g test-rg -n test-pip -o json', print_command_to_run = False) + ] + mock_run.assert_has_calls(expected_calls) + + +def test_get_appgw_endpoint_no_gateway(): + """Test Application Gateway endpoint when no gateway found.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'No gateways found') + + hostname, ip = get_appgw_endpoint('test-rg') + + assert hostname is None + assert ip is None + + +def test_get_appgw_endpoint_no_listeners(): + """Test Application Gateway endpoint with no HTTP listeners.""" + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, '') + mock_run.return_value.json_data = [{ + 'name': 'test-appgw', + 'httpListeners': [], + 'frontendIPConfigurations': [] + }] + + hostname, ip = get_appgw_endpoint('test-rg') + + assert hostname is None + assert ip is None + + +# ------------------------------ +# NAMING FUNCTION TESTS +# ------------------------------ + +def test_get_infra_rg_name_without_index(): + """Test infrastructure resource group name generation without index.""" + + result = get_infra_rg_name(INFRASTRUCTURE.SIMPLE_APIM) + + assert result == 'apim-infra-simple-apim' + + +def test_get_infra_rg_name_with_index(): + """Test infrastructure resource group name generation with index.""" + + result = get_infra_rg_name(INFRASTRUCTURE.AFD_APIM_PE, 42) + + assert result == 'apim-infra-afd-apim-pe-42' + + +def test_get_rg_name_without_index(): + """Test sample resource group name generation without index.""" + + result = get_rg_name('test-sample') + + assert result == 'apim-sample-test-sample' + + +def test_get_rg_name_with_index(): + """Test sample resource group name generation with index.""" + + result = get_rg_name('test-sample', 5) + + assert result == 'apim-sample-test-sample-5' + + +# ------------------------------ +# UNIQUE SUFFIX TESTS +# ------------------------------ + +@patch('azure_resources.tempfile.NamedTemporaryFile') +@patch('azure_resources.time.time') +@patch('azure_resources.os.unlink') +def test_get_unique_suffix_for_resource_group_success(mock_unlink, mock_time, mock_tempfile): + """Test successful unique suffix retrieval.""" + + mock_time.return_value = 1234567890 + mock_file = Mock() + mock_file.name = '/tmp/template.json' + mock_tempfile.return_value.__enter__.return_value = mock_file + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(True, 'abc123def456\n') + + result = get_unique_suffix_for_resource_group('test-rg') + + assert result == 'abc123def456' + mock_run.assert_called_once() + mock_unlink.assert_called_once_with('/tmp/template.json') + + +@patch('azure_resources.tempfile.NamedTemporaryFile') +@patch('azure_resources.time.time') +@patch('azure_resources.os.unlink') +def test_get_unique_suffix_for_resource_group_failure(mock_unlink, mock_time, mock_tempfile): + """Test unique suffix retrieval failure.""" + + mock_time.return_value = 1234567890 + mock_file = Mock() + mock_file.name = '/tmp/template.json' + mock_tempfile.return_value.__enter__.return_value = mock_file + + with patch('azure_resources._run') as mock_run: + mock_run.return_value = Output(False, 'Deployment failed') + + result = get_unique_suffix_for_resource_group('test-rg') + + assert result == '' + mock_unlink.assert_called_once_with('/tmp/template.json') + + +# ------------------------------ +# ENDPOINTS TESTS +# ------------------------------ + +@patch('azure_resources.get_frontdoor_url') +@patch('azure_resources.get_apim_url') +@patch('azure_resources.get_appgw_endpoint') +def test_get_endpoints_success(mock_appgw, mock_apim, mock_afd): + """Test successful endpoints retrieval.""" + + mock_afd.return_value = 'https://test.azurefd.net' + mock_apim.return_value = 'https://test-apim.azure-api.net' + mock_appgw.return_value = ('api.contoso.com', '1.2.3.4') + + result = get_endpoints(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + + assert isinstance(result, Endpoints) + assert result.afd_endpoint_url == 'https://test.azurefd.net' + assert result.apim_endpoint_url == 'https://test-apim.azure-api.net' + assert result.appgw_hostname == 'api.contoso.com' + assert result.appgw_public_ip == '1.2.3.4' + + mock_afd.assert_called_once_with(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + mock_apim.assert_called_once_with('test-rg') + mock_appgw.assert_called_once_with('test-rg') diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 1cec072..7f1144e 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -13,75 +13,6 @@ import utils import json_utils -# ------------------------------ -# get_account_info -# ------------------------------ - -def test_get_account_info_success(monkeypatch): - mock_json = { - 'user': {'name': 'testuser'}, - 'tenantId': 'tenant', - 'id': 'subid' - } - mock_ad_json = { - 'id': 'userid' - } - - # Mock both calls that get_account_info makes - call_count = [0] - def mock_run_multiple(*args, **kwargs): - call_count[0] += 1 - if call_count[0] == 1: # First call: az account show - return MagicMock(success=True, json_data=mock_json) - - # Second call: az ad signed-in-user show - return MagicMock(success=True, json_data=mock_ad_json) - - monkeypatch.setattr(utils, 'run', mock_run_multiple) - result = utils.get_account_info() - assert result == ('testuser', 'userid', 'tenant', 'subid') - -def test_get_account_info_failure(monkeypatch): - mock_output = MagicMock(success=False, json_data=None) - monkeypatch.setattr(utils, 'run', lambda *a, **kw: mock_output) - with pytest.raises(Exception): - utils.get_account_info() - -# ------------------------------ -# get_deployment_name -# ------------------------------ - -def test_get_deployment_name(monkeypatch): - monkeypatch.setattr(os, 'getcwd', lambda: '/foo/bar/baz') - assert utils.get_deployment_name() == 'baz' - -def test_get_deployment_name_error(monkeypatch): - monkeypatch.setattr(os, 'getcwd', lambda: '') - with pytest.raises(RuntimeError): - utils.get_deployment_name() - -# ------------------------------ -# get_frontdoor_url -# ------------------------------ - -def test_get_frontdoor_url_success(monkeypatch): - mock_profile = [{'name': 'afd1'}] - mock_endpoints = [{'hostName': 'foo.azurefd.net'}] - def run_side_effect(cmd, *a, **kw): - if 'profile list' in cmd: - return MagicMock(success=True, json_data=mock_profile) - if 'endpoint list' in cmd: - return MagicMock(success=True, json_data=mock_endpoints) - return MagicMock(success=False, json_data=None) - monkeypatch.setattr(utils, 'run', run_side_effect) - url = utils.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'rg') - assert url == 'https://foo.azurefd.net' - -def test_get_frontdoor_url_none(monkeypatch): - monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=False, json_data=None)) - url = utils.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'rg') - assert url is None - # ------------------------------ # get_infra_rg_name & get_rg_name # ------------------------------ @@ -117,24 +48,6 @@ def fail(*a, **kw): assert out.success is False assert isinstance(out.text, str) -# ------------------------------ -# create_resource_group & does_resource_group_exist -# ------------------------------ - -def test_does_resource_group_exist(monkeypatch): - monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=True)) - assert utils.does_resource_group_exist('foo') is True - monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=False)) - assert utils.does_resource_group_exist('foo') is False - -def test_create_resource_group(monkeypatch): - called = {} - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg: False) - monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: called.setdefault('info', True)) - monkeypatch.setattr(utils, 'run', lambda *a, **kw: called.setdefault('run', True)) - utils.create_resource_group('foo', 'bar') - assert called['run'] - # ------------------------------ # read_policy_xml # ------------------------------ @@ -292,86 +205,6 @@ def test_build_infrastructure_tags_none_custom_tags(): assert result == expected -# ------------------------------ -# create_resource_group -# ------------------------------ - -def test_create_resource_group_not_exists_no_tags(monkeypatch): - """Test create_resource_group when resource group doesn't exist and no tags provided.""" - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: False) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', MagicMock()) - - utils.create_resource_group('test-rg', 'eastus') - - # Verify the correct command was called - expected_cmd = 'az group create --name test-rg --location eastus --tags source=apim-sample' - mock_run.assert_called_once() - actual_cmd = mock_run.call_args[0][0] - assert actual_cmd == expected_cmd - -def test_create_resource_group_not_exists_with_tags(monkeypatch): - """Test create_resource_group when resource group doesn't exist and tags are provided.""" - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: False) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', MagicMock()) - - tags = {'infrastructure': 'simple-apim', 'env': 'dev'} - utils.create_resource_group('test-rg', 'eastus', tags) - - # Verify the correct command was called with tags - mock_run.assert_called_once() - actual_cmd = mock_run.call_args[0][0] - assert 'source=apim-sample' in actual_cmd - assert 'infrastructure="simple-apim"' in actual_cmd - assert 'env="dev"' in actual_cmd - -def test_create_resource_group_already_exists(monkeypatch): - """Test create_resource_group when resource group already exists.""" - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: True) - mock_run = MagicMock() - monkeypatch.setattr(utils, 'run', mock_run) - - utils.create_resource_group('existing-rg', 'eastus') - - # Verify run was not called since RG already exists - mock_run.assert_not_called() - -def test_create_resource_group_tags_with_special_chars(monkeypatch): - """Test create_resource_group with tags containing special characters.""" - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: False) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', MagicMock()) - - tags = {'description': 'This is a test environment', 'owner': 'john@company.com'} - utils.create_resource_group('test-rg', 'eastus', tags) - - mock_run.assert_called_once() - actual_cmd = mock_run.call_args[0][0] - # Check that quotes are properly escaped - assert 'description="This is a test environment"' in actual_cmd - assert 'owner="john@company.com"' in actual_cmd - -def test_create_resource_group_tags_with_numeric_values(monkeypatch): - """Test create_resource_group with tags containing numeric values.""" - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: False) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'print_info', MagicMock()) - - tags = {'cost-center': 12345, 'version': 1.0} - utils.create_resource_group('test-rg', 'eastus', tags) - - mock_run.assert_called_once() - actual_cmd = mock_run.call_args[0][0] - # Numeric values should be converted to strings - assert 'cost-center="12345"' in actual_cmd - assert 'version="1.0"' in actual_cmd - - # ------------------------------ # create_bicep_deployment_group # ------------------------------ @@ -605,44 +438,6 @@ def test_determine_policy_path_full_path(): assert result == full_path -def test_check_apim_blob_permissions_success(monkeypatch): - """Test check_apim_blob_permissions with successful permissions.""" - def mock_run_success(cmd, **kwargs): - if 'az apim show' in cmd and 'identity.principalId' in cmd: - return utils.Output(success=True, text='12345678-1234-1234-1234-123456789012') - if 'az storage account show' in cmd and '--query id' in cmd: - return utils.Output(success=True, text='/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/test-storage') - if 'az role assignment list' in cmd: - return utils.Output(success=True, text='/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/test-rg/providers/Microsoft.Authorization/roleAssignments/test-assignment') - if 'az storage blob list' in cmd: - return utils.Output(success=True, text='test-blob.txt') - - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, 'run', mock_run_success) - monkeypatch.setattr(utils, 'print_info', lambda x: None) - monkeypatch.setattr(utils, 'print_success', lambda x: None) - - result = utils.check_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) - assert result is True - - -def test_check_apim_blob_permissions_failure(monkeypatch): - """Test check_apim_blob_permissions with failed permissions.""" - def mock_run_failure(cmd, **kwargs): - if 'az apim api operation' in cmd: - return utils.Output(success=True, text='{"statusCode": 403}') - return utils.Output(success=True, text='{}') - - monkeypatch.setattr(utils, 'run', mock_run_failure) - monkeypatch.setattr(utils, 'print_info', lambda x: None) - monkeypatch.setattr(utils, 'print_warning', lambda x: None) - monkeypatch.setattr('time.sleep', lambda x: None) - - result = utils.check_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) - assert result is False - - def test_wait_for_apim_blob_permissions_success(monkeypatch): """Test wait_for_apim_blob_permissions with successful wait.""" monkeypatch.setattr(utils, 'check_apim_blob_permissions', lambda *args: True) @@ -844,260 +639,17 @@ def test_get_azure_role_guid_comprehensive(monkeypatch): # ------------------------------ # INFRASTRUCTURE SELECTION TESTS +# Note: Tests for _find_infrastructure_instances and _query_and_select_infrastructure +# removed as they test private implementation details. The standalone function +# find_infrastructure_instances() is tested in test_azure_resources.py # ------------------------------ -def test_find_infrastructure_instances_success(monkeypatch): - """Test _find_infrastructure_instances with successful Azure query.""" - # Create a mock NotebookHelper instance - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock successful Azure CLI response - mock_output = utils.Output(success=True, text='apim-infra-simple-apim-1\napim-infra-simple-apim-2\napim-infra-simple-apim') - monkeypatch.setattr(utils, 'run', lambda *args, **kwargs: mock_output) - - result = nb_helper._find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - - expected = [ - (INFRASTRUCTURE.SIMPLE_APIM, None), - (INFRASTRUCTURE.SIMPLE_APIM, 1), - (INFRASTRUCTURE.SIMPLE_APIM, 2) - ] - # Check that we have the expected results regardless of order - assert len(result) == len(expected) - assert set(result) == set(expected) - -def test_find_infrastructure_instances_no_results(monkeypatch): - """Test _find_infrastructure_instances with no matching resource groups.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock empty Azure CLI response - mock_output = utils.Output(success=True, text='') - monkeypatch.setattr(utils, 'run', lambda *args, **kwargs: mock_output) - - result = nb_helper._find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - assert result == [] - -def test_find_infrastructure_instances_failure(monkeypatch): - """Test _find_infrastructure_instances when Azure CLI fails.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock failed Azure CLI response - mock_output = utils.Output(success=False, text='Error: Authentication failed') - monkeypatch.setattr(utils, 'run', lambda *args, **kwargs: mock_output) - - result = nb_helper._find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - assert result == [] - -def test_find_infrastructure_instances_invalid_names(monkeypatch): - """Test _find_infrastructure_instances with invalid resource group names.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock Azure CLI response with valid and invalid names - mock_output = utils.Output( - success=True, - text='apim-infra-simple-apim-1\napim-infra-simple-apim-invalid\napim-infra-simple-apim-2\napim-infra-different' - ) - monkeypatch.setattr(utils, 'run', lambda *args, **kwargs: mock_output) - - result = nb_helper._find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - - # Should only include valid names and skip invalid ones - expected = [ - (INFRASTRUCTURE.SIMPLE_APIM, 1), - (INFRASTRUCTURE.SIMPLE_APIM, 2) - ] - # Check that we have the expected results regardless of order - assert len(result) == len(expected) - assert set(result) == set(expected) - -def test_find_infrastructure_instances_mixed_formats(monkeypatch): - """Test _find_infrastructure_instances with mixed indexed and non-indexed names.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.APIM_ACA, [INFRASTRUCTURE.APIM_ACA] - ) - - # Mock Azure CLI response with mixed formats - mock_output = utils.Output( - success=True, - text='apim-infra-apim-aca\napim-infra-apim-aca-1\napim-infra-apim-aca-5' - ) - monkeypatch.setattr(utils, 'run', lambda *args, **kwargs: mock_output) - - result = nb_helper._find_infrastructure_instances(INFRASTRUCTURE.APIM_ACA) - - expected = [ - (INFRASTRUCTURE.APIM_ACA, None), - (INFRASTRUCTURE.APIM_ACA, 1), - (INFRASTRUCTURE.APIM_ACA, 5) - ] - # Check that we have the expected results regardless of order - assert len(result) == len(expected) - assert set(result) == set(expected) - -def test_query_and_select_infrastructure_no_options(monkeypatch): - """Test _query_and_select_infrastructure when no infrastructures are available.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM, INFRASTRUCTURE.APIM_ACA] - ) - - # Mock empty results for all infrastructure types - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', lambda x: []) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_warning', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - - # Mock the infrastructure creation to succeed - def mock_infrastructure_creation(self, bypass_check=True): - return True - - monkeypatch.setattr(utils.InfrastructureNotebookHelper, 'create_infrastructure', mock_infrastructure_creation) - - # When no infrastructures are available, it should automatically create new infrastructure - result = nb_helper._query_and_select_infrastructure() - - # Expect it to return the desired infrastructure and None index (since 'test-rg' doesn't match the expected pattern) - assert result == (INFRASTRUCTURE.SIMPLE_APIM, None) - -def test_query_and_select_infrastructure_single_option(monkeypatch): - """Test _query_and_select_infrastructure with a single available option.""" - # Set up nb_helper with a resource group name that doesn't match the desired pattern - # This forces the method to show the selection menu instead of finding existing desired infrastructure - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM, INFRASTRUCTURE.APIM_ACA] - ) - - # Mock single result that doesn't match the desired infrastructure - def mock_find_instances(infra): - if infra == INFRASTRUCTURE.SIMPLE_APIM: - return [(INFRASTRUCTURE.SIMPLE_APIM, 2)] # Different index than expected - return [] - - # Mock the infrastructure creation to succeed - def mock_infrastructure_creation(self, bypass_check=True): - return True - - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', mock_find_instances) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_warning', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx}') - monkeypatch.setattr(utils, 'get_resource_group_location', lambda rg_name: 'eastus') - monkeypatch.setattr(utils.InfrastructureNotebookHelper, 'create_infrastructure', mock_infrastructure_creation) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - - # Mock user input to select option 2 (the existing infrastructure, since option 1 is "create new") - monkeypatch.setattr('builtins.input', lambda prompt: '2') - - result = nb_helper._query_and_select_infrastructure() - assert result == (INFRASTRUCTURE.SIMPLE_APIM, 2) - -def test_query_and_select_infrastructure_multiple_options(monkeypatch): - """Test _query_and_select_infrastructure with multiple available options.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM, INFRASTRUCTURE.APIM_ACA] - ) - - # Mock multiple results - def mock_find_instances(infra): - if infra == INFRASTRUCTURE.SIMPLE_APIM: - return [(INFRASTRUCTURE.SIMPLE_APIM, 1), (INFRASTRUCTURE.SIMPLE_APIM, 2)] - if infra == INFRASTRUCTURE.APIM_ACA: - return [(INFRASTRUCTURE.APIM_ACA, None)] - return [] - - # Mock the infrastructure creation to succeed - def mock_infrastructure_creation(self, bypass_check=True): - return True - - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', mock_find_instances) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx or ""}') - monkeypatch.setattr(utils, 'get_resource_group_location', lambda rg_name: 'eastus') - monkeypatch.setattr(utils.InfrastructureNotebookHelper, 'create_infrastructure', mock_infrastructure_creation) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - - # Options are sorted: - # 1. Create new simple-apim (index: 1 since nb_helper._get_current_index() returns 1 for 'test-rg') - # 2. apim-aca (no index) - sorted first alphabetically - # 3. simple-apim (index: 1) - # 4. simple-apim (index: 2) - # Select option 2 (first existing infrastructure: APIM_ACA with no index) - monkeypatch.setattr('builtins.input', lambda prompt: '2') - - result = nb_helper._query_and_select_infrastructure() - assert result == (INFRASTRUCTURE.APIM_ACA, None) - -def test_query_and_select_infrastructure_user_cancellation(monkeypatch): - """Test _query_and_select_infrastructure when user cancels selection.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock single result - def mock_find_instances(infra): - return [(INFRASTRUCTURE.SIMPLE_APIM, 1)] - - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', mock_find_instances) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_warning', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx}') - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - - # Mock user input to press Enter (cancel) - monkeypatch.setattr('builtins.input', lambda prompt: '') - - result = nb_helper._query_and_select_infrastructure() - assert result == (None, None) - -def test_query_and_select_infrastructure_invalid_input_then_valid(monkeypatch): - """Test _query_and_select_infrastructure with invalid input followed by valid input.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM] - ) - - # Mock single result that doesn't match the desired infrastructure - def mock_find_instances(infra): - return [(INFRASTRUCTURE.SIMPLE_APIM, 2)] # Different index - - # Mock the infrastructure creation to succeed - def mock_infrastructure_creation(self, bypass_check=True): - return True - - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', mock_find_instances) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_error', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx}') - monkeypatch.setattr(utils, 'get_resource_group_location', lambda rg_name: 'eastus') - monkeypatch.setattr(utils.InfrastructureNotebookHelper, 'create_infrastructure', mock_infrastructure_creation) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - - # Mock user input sequence: invalid number, invalid text, then valid choice (option 2 = existing infrastructure) - inputs = iter(['99', 'abc', '2']) - monkeypatch.setattr('builtins.input', lambda prompt: next(inputs)) - - result = nb_helper._query_and_select_infrastructure() - assert result == (INFRASTRUCTURE.SIMPLE_APIM, 2) +# End of Infrastructure Selection Tests - NotebookHelper._query_and_select_infrastructure tests removed +# as they test private implementation details. The public behavior is tested through integration tests. +# ------------------------------ +# TESTS FOR _prompt_for_infrastructure_update +# ------------------------------ # ------------------------------ # TESTS FOR _prompt_for_infrastructure_update @@ -1430,47 +982,3 @@ def test_notebookhelper_initialization_with_jwt(monkeypatch): assert nb_helper.jwt_key_name == 'JwtSigningKey-test-sample-1234567890' assert nb_helper.jwt_key_value == 'test-key' assert nb_helper.jwt_key_value_bytes_b64 == 'test-key-b64' - -def test_infrastructure_sorting_in_query_and_select(monkeypatch): - """Test that infrastructure options are sorted correctly by type then index.""" - nb_helper = utils.NotebookHelper( - 'test-sample', 'test-rg', 'eastus', - INFRASTRUCTURE.SIMPLE_APIM, [INFRASTRUCTURE.SIMPLE_APIM, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.AFD_APIM_PE] - ) - - # Mock mixed results in unsorted order - def mock_find_instances(infra): - if infra == INFRASTRUCTURE.SIMPLE_APIM: - return [(INFRASTRUCTURE.SIMPLE_APIM, 3), (INFRASTRUCTURE.SIMPLE_APIM, 1)] - if infra == INFRASTRUCTURE.APIM_ACA: - return [(INFRASTRUCTURE.APIM_ACA, None), (INFRASTRUCTURE.APIM_ACA, 2)] - if infra == INFRASTRUCTURE.AFD_APIM_PE: - return [(INFRASTRUCTURE.AFD_APIM_PE, 1)] - - return [] - - # Mock the infrastructure creation to succeed - def mock_infrastructure_creation(self, bypass_check=True): - return True - - monkeypatch.setattr(nb_helper, '_find_infrastructure_instances', mock_find_instances) - monkeypatch.setattr(utils, 'print_info', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx or ""}') - monkeypatch.setattr(utils, 'get_resource_group_location', lambda rg_name: 'eastus') - monkeypatch.setattr(utils.InfrastructureNotebookHelper, 'create_infrastructure', mock_infrastructure_creation) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - - # Test sorting by selecting different options: - # Options should be sorted: AFD_APIM_PE(1), APIM_ACA(None), APIM_ACA(2), SIMPLE_APIM(1), SIMPLE_APIM(3) - # 1 = Create new simple-apim - # 2 = afd-apim-pe (index: 1) - alphabetically first - # 3 = apim-aca (no index) - None treated as 0 - # 4 = apim-aca (index: 2) - # 5 = simple-apim (index: 1) - # 6 = simple-apim (index: 3) - - # Test selecting the first existing infrastructure (afd-apim-pe with index 1) - monkeypatch.setattr('builtins.input', lambda prompt: '2') - result = nb_helper._query_and_select_infrastructure() - assert result == (INFRASTRUCTURE.AFD_APIM_PE, 1) From 99ea4592626b818fdabb907cedcd7a7eaf83cd70 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Thu, 11 Dec 2025 11:37:07 -0500 Subject: [PATCH 11/23] Add Python line ending LF --- tests/python/test_utils.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 7f1144e..5479b8a 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -128,7 +128,6 @@ def mock_inspect_currentframe(): with pytest.raises(ValueError, match='Could not auto-detect sample name'): utils.read_policy_xml('policy.xml', {'key': 'value'}) - # ------------------------------ # validate_infrastructure # ------------------------------ @@ -157,7 +156,6 @@ def test_generate_signing_key(): assert isinstance(s, str) assert isinstance(b64, str) - # ------------------------------ # build_infrastructure_tags # ------------------------------ @@ -204,7 +202,6 @@ def test_build_infrastructure_tags_none_custom_tags(): expected = {'infrastructure': 'apim-aca'} assert result == expected - # ------------------------------ # create_bicep_deployment_group # ------------------------------ @@ -608,7 +605,6 @@ def mock_run_with_tags(*args, **kwargs): utils.create_resource_group('test-rg', 'eastus', {}) # Empty dict, function doesn't return anything - # ------------------------------ # ROLE AND PERMISSION TESTS # ------------------------------ @@ -636,7 +632,6 @@ def test_get_azure_role_guid_comprehensive(monkeypatch): result = utils.get_azure_role_guid('Nonexistent Role') assert result is None - # ------------------------------ # INFRASTRUCTURE SELECTION TESTS # Note: Tests for _find_infrastructure_instances and _query_and_select_infrastructure @@ -700,7 +695,6 @@ def test_prompt_for_infrastructure_update_invalid_choice_then_valid(monkeypatch) result = utils._prompt_for_infrastructure_update('test-rg') assert result == (True, None) - # ------------------------------ # TESTS FOR InfrastructureNotebookHelper.create_infrastructure WITH INDEX RETRY # ------------------------------ From 1afedcaff23b07a18750533f0a1af08cb25eb3a5 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Thu, 11 Dec 2025 11:37:36 -0500 Subject: [PATCH 12/23] Add Python line ending LF --- .gitattributes | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitattributes b/.gitattributes index 816381a..da60e48 100644 --- a/.gitattributes +++ b/.gitattributes @@ -26,6 +26,9 @@ # Markdown files should use LF *.md text eol=lf +# Python files should use LF +*.py text eol=lf + # Windows batch files should use CRLF *.bat text eol=crlf *.cmd text eol=crlf From 5af64df811dcc703834959a3eab41aa47fcc7777 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Thu, 11 Dec 2025 11:54:14 -0500 Subject: [PATCH 13/23] Fix Pylint issues --- tests/python/.pylintrc | 1 + tests/python/test_azure_resources.py | 64 ++++++++++++++-------------- tests/python/test_json_utils.py | 2 +- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/tests/python/.pylintrc b/tests/python/.pylintrc index 11ab6fd..7778e8f 100644 --- a/tests/python/.pylintrc +++ b/tests/python/.pylintrc @@ -13,6 +13,7 @@ disable = C0116, # Missing function or method docstring E0401, # Import error W0212, # Access to a protected member _ of a client class + W0401, # Wildcard import (allow for backwards compatibility) R0903, # Too few public methods R0911, # Too many return statements R0913, # Too many arguments diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py index 2bf7d05..4a59e66 100644 --- a/tests/python/test_azure_resources.py +++ b/tests/python/test_azure_resources.py @@ -3,10 +3,10 @@ """ import json -import pytest from unittest.mock import Mock, patch, mock_open, call +import pytest -from azure_resources import* +import azure_resources as az from apimtypes import INFRASTRUCTURE, Endpoints, Output @@ -20,7 +20,7 @@ def test_get_azure_role_guid_success(): mock_data = {'Contributor': 'role-guid-12345', 'Reader': 'role-guid-67890'} with patch('builtins.open', mock_open(read_data=json.dumps(mock_data))): - result = get_azure_role_guid('Contributor') + result = az.get_azure_role_guid('Contributor') assert result == 'role-guid-12345' @@ -29,7 +29,7 @@ def test_get_azure_role_guid_failure(): """Test get_azure_role_guid returns None when file not found.""" with patch('builtins.open', side_effect=FileNotFoundError('File not found')): - result = get_azure_role_guid('NonExistentRole') + result = az.get_azure_role_guid('NonExistentRole') assert result is None @@ -43,7 +43,7 @@ def test_does_resource_group_exist_true(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(True, '{"name": "test-rg"}') - result = does_resource_group_exist('test-rg') + result = az.does_resource_group_exist('test-rg') assert result is True mock_run.assert_called_once_with( @@ -59,7 +59,7 @@ def test_does_resource_group_exist_false(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'ResourceGroupNotFound') - result = does_resource_group_exist('nonexistent-rg') + result = az.does_resource_group_exist('nonexistent-rg') assert result is False @@ -70,7 +70,7 @@ def test_get_resource_group_location_success(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(True, 'eastus2\n') - result = get_resource_group_location('test-rg') + result = az.get_resource_group_location('test-rg') assert result == 'eastus2' mock_run.assert_called_once_with( @@ -86,7 +86,7 @@ def test_get_resource_group_location_failure(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'error message') - result = get_resource_group_location('nonexistent-rg') + result = az.get_resource_group_location('nonexistent-rg') assert result is None @@ -97,7 +97,7 @@ def test_get_resource_group_location_empty(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(True, '') - result = get_resource_group_location('test-rg') + result = az.get_resource_group_location('test-rg') assert result is None @@ -122,7 +122,7 @@ def test_get_account_info_success(): mock_run.side_effect = [account_output, ad_user_output] - current_user, current_user_id, tenant_id, subscription_id = get_account_info() + current_user, current_user_id, tenant_id, subscription_id = az.get_account_info() assert current_user == 'test.user@example.com' assert current_user_id == 'user-id-12345' @@ -137,7 +137,7 @@ def test_get_account_info_failure(): mock_run.return_value = Output(False, 'authentication error') with pytest.raises(Exception) as exc_info: - get_account_info() + az.get_account_info() assert 'Failed to retrieve account information' in str(exc_info.value) @@ -151,7 +151,7 @@ def test_get_account_info_no_json(): mock_run.return_value = output with pytest.raises(Exception) as exc_info: - get_account_info() + az.get_account_info() assert 'Failed to retrieve account information' in str(exc_info.value) @@ -167,7 +167,7 @@ def test_get_deployment_name_with_directory(mock_getcwd, mock_basename, mock_tim mock_time.return_value = 1234567890 - result = get_deployment_name('my-sample') + result = az.get_deployment_name('my-sample') assert result == 'deploy-my-sample-1234567890' mock_getcwd.assert_not_called() @@ -184,7 +184,7 @@ def test_get_deployment_name_current_directory(mock_getcwd, mock_basename, mock_ mock_getcwd.return_value = '/path/to/current-folder' mock_basename.return_value = 'current-folder' - result = get_deployment_name() + result = az.get_deployment_name() assert result == 'deploy-current-folder-1234567890' mock_getcwd.assert_called_once() @@ -208,7 +208,7 @@ def test_get_frontdoor_url_afd_success(): mock_run.side_effect = [profile_output, endpoint_output] - result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + result = az.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') assert result == 'https://test.azurefd.net' @@ -223,7 +223,7 @@ def test_get_frontdoor_url_wrong_infrastructure(): """Test Front Door URL with wrong infrastructure type.""" with patch('azure_resources._run') as mock_run: - result = get_frontdoor_url(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') + result = az.get_frontdoor_url(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') assert result is None mock_run.assert_not_called() @@ -235,7 +235,7 @@ def test_get_frontdoor_url_no_profile(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'No profiles found') - result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + result = az.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') assert result is None @@ -249,7 +249,7 @@ def test_get_frontdoor_url_no_endpoints(): endpoint_output = Output(False, 'No endpoints found') mock_run.side_effect = [profile_output, endpoint_output] - result = get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + result = az.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') assert result is None @@ -265,7 +265,7 @@ def test_get_apim_url_success(): mock_run.return_value = Output(True, '') mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': 'https://test-apim.azure-api.net'}] - result = get_apim_url('test-rg') + result = az.get_apim_url('test-rg') assert result == 'https://test-apim.azure-api.net' mock_run.assert_called_once_with( @@ -280,7 +280,7 @@ def test_get_apim_url_failure(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'No APIM services found') - result = get_apim_url('test-rg') + result = az.get_apim_url('test-rg') assert result is None @@ -292,7 +292,7 @@ def test_get_apim_url_no_gateway(): mock_run.return_value = Output(True, '') mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': None}] - result = get_apim_url('test-rg') + result = az.get_apim_url('test-rg') assert result is None @@ -317,7 +317,7 @@ def test_get_appgw_endpoint_success(): ip_output.json_data = {'ipAddress': '1.2.3.4'} mock_run.side_effect = [appgw_output, ip_output] - hostname, ip = get_appgw_endpoint('test-rg') + hostname, ip = az.get_appgw_endpoint('test-rg') assert hostname == 'api.contoso.com' assert ip == '1.2.3.4' @@ -335,7 +335,7 @@ def test_get_appgw_endpoint_no_gateway(): with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'No gateways found') - hostname, ip = get_appgw_endpoint('test-rg') + hostname, ip = az.get_appgw_endpoint('test-rg') assert hostname is None assert ip is None @@ -352,7 +352,7 @@ def test_get_appgw_endpoint_no_listeners(): 'frontendIPConfigurations': [] }] - hostname, ip = get_appgw_endpoint('test-rg') + hostname, ip = az.get_appgw_endpoint('test-rg') assert hostname is None assert ip is None @@ -365,7 +365,7 @@ def test_get_appgw_endpoint_no_listeners(): def test_get_infra_rg_name_without_index(): """Test infrastructure resource group name generation without index.""" - result = get_infra_rg_name(INFRASTRUCTURE.SIMPLE_APIM) + result = az.get_infra_rg_name(INFRASTRUCTURE.SIMPLE_APIM) assert result == 'apim-infra-simple-apim' @@ -373,7 +373,7 @@ def test_get_infra_rg_name_without_index(): def test_get_infra_rg_name_with_index(): """Test infrastructure resource group name generation with index.""" - result = get_infra_rg_name(INFRASTRUCTURE.AFD_APIM_PE, 42) + result = az.get_infra_rg_name(INFRASTRUCTURE.AFD_APIM_PE, 42) assert result == 'apim-infra-afd-apim-pe-42' @@ -381,7 +381,7 @@ def test_get_infra_rg_name_with_index(): def test_get_rg_name_without_index(): """Test sample resource group name generation without index.""" - result = get_rg_name('test-sample') + result = az.get_rg_name('test-sample') assert result == 'apim-sample-test-sample' @@ -389,7 +389,7 @@ def test_get_rg_name_without_index(): def test_get_rg_name_with_index(): """Test sample resource group name generation with index.""" - result = get_rg_name('test-sample', 5) + result = az.get_rg_name('test-sample', 5) assert result == 'apim-sample-test-sample-5' @@ -412,7 +412,7 @@ def test_get_unique_suffix_for_resource_group_success(mock_unlink, mock_time, mo with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(True, 'abc123def456\n') - result = get_unique_suffix_for_resource_group('test-rg') + result = az.get_unique_suffix_for_resource_group('test-rg') assert result == 'abc123def456' mock_run.assert_called_once() @@ -433,9 +433,9 @@ def test_get_unique_suffix_for_resource_group_failure(mock_unlink, mock_time, mo with patch('azure_resources._run') as mock_run: mock_run.return_value = Output(False, 'Deployment failed') - result = get_unique_suffix_for_resource_group('test-rg') + result = az.get_unique_suffix_for_resource_group('test-rg') - assert result == '' + assert not result mock_unlink.assert_called_once_with('/tmp/template.json') @@ -453,7 +453,7 @@ def test_get_endpoints_success(mock_appgw, mock_apim, mock_afd): mock_apim.return_value = 'https://test-apim.azure-api.net' mock_appgw.return_value = ('api.contoso.com', '1.2.3.4') - result = get_endpoints(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') + result = az.get_endpoints(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') assert isinstance(result, Endpoints) assert result.afd_endpoint_url == 'https://test.azurefd.net' diff --git a/tests/python/test_json_utils.py b/tests/python/test_json_utils.py index 443d969..37b050b 100644 --- a/tests/python/test_json_utils.py +++ b/tests/python/test_json_utils.py @@ -2,8 +2,8 @@ Unit tests for test_utils.py. """ -import pytest import json +import pytest import json_utils From c6f1aa7c851541734aa890f5e56e8b5bf32fb909 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 07:34:03 -0500 Subject: [PATCH 14/23] Fix pylint and test issues --- .gitignore | 5 +- README.md | 56 ++++++-- setup/setup_python_path.py | 29 ++--- setup/verify_local_setup.py | 31 ++--- shared/python/apimrequests.py | 114 +++++++++-------- shared/python/apimtesting.py | 6 +- shared/python/apimtypes.py | 56 ++++++-- shared/python/authfactory.py | 2 +- shared/python/azure_resources.py | 40 +++++- shared/python/charts.py | 5 +- shared/python/console.py | 12 ++ shared/python/infrastructures.py | 74 ++++++----- shared/python/json_utils.py | 4 +- shared/python/users.py | 8 +- shared/python/utils.py | 184 +++++++++++---------------- tests/README.md | 123 +++++++++++++++++- tests/python/.pylintrc | 12 ++ tests/python/check_python.ps1 | 125 ++++++++++++++++++ tests/python/check_python.sh | 108 ++++++++++++++++ tests/python/run_pylint.ps1 | 35 +++-- tests/python/run_pylint.sh | 25 +++- tests/python/test_apimrequests.py | 78 ++++++------ tests/python/test_authfactory.py | 2 +- tests/python/test_infrastructures.py | 6 +- tests/python/test_users.py | 8 +- tests/python/test_utils.py | 32 +++-- 26 files changed, 836 insertions(+), 344 deletions(-) create mode 100644 tests/python/check_python.ps1 create mode 100644 tests/python/check_python.sh diff --git a/.gitignore b/.gitignore index 5c3aa20..99651e3 100644 --- a/.gitignore +++ b/.gitignore @@ -36,4 +36,7 @@ tests/python/$TextReport shared/bicep/modules/**/*.json main.json -Test-Matrix.html \ No newline at end of file +Test-Matrix.html + +$JsonReport +$TextReport diff --git a/README.md b/README.md index 729fdbf..4ae4f00 100644 --- a/README.md +++ b/README.md @@ -225,17 +225,41 @@ The repo uses the bicep linter and has rules defined in `bicepconfig.json`. See The repository uses [pylint][pylint-docs] to maintain Python code quality standards. Configuration is located in `tests/python/.pylintrc`. -#### Running Pylint +#### Running Code Quality Checks + +**Using the combined check script (recommended):** + +This is the preferred method as it runs both linting and testing in a single command: + +```powershell +# From repository root +.\tests\python\check_python.ps1 # Run both pylint and pytest +.\tests\python\check_python.ps1 -ShowLintReport # Include detailed pylint report +``` + +```bash +# From repository root +./tests/python/check_python.sh # Run both pylint and pytest +./tests/python/check_python.sh --show-report # Include detailed pylint report +``` + +**Running pylint only:** -**Using the convenience script (recommended):** ```powershell -# From tests/python directory -.\run_pylint.ps1 # Run with default settings -.\run_pylint.ps1 -ShowReport # Include full detailed report -.\run_pylint.ps1 -Target "../../samples" # Analyze a different directory +# From repository root +.\tests\python\run_pylint.ps1 # Run with default settings +.\tests\python\run_pylint.ps1 -ShowReport # Include full detailed report +.\tests\python\run_pylint.ps1 -Target "samples" # Analyze specific directory +``` + +```bash +# From repository root +./tests/python/run_pylint.sh # Run with default settings +./tests/python/run_pylint.sh samples --show-report # Analyze specific directory with report ``` **Manual execution:** + ```powershell pylint --rcfile tests/python/.pylintrc shared/python ``` @@ -267,10 +291,26 @@ Python modules in `shared/python` are covered by comprehensive unit tests locate #### 🚀 Running Tests Locally +**Using the combined check script (recommended):** + +This is the preferred method as it runs both linting and testing: + +```powershell +# From repository root +.\tests\python\check_python.ps1 +``` + +```bash +# From repository root +./tests/python/check_python.sh +``` + +**Running tests only:** + - **PowerShell (Windows):** - - Run all tests with coverage: `./tests/python/run_tests.ps1` + - Run all tests with coverage: `.\tests\python\run_tests.ps1` (from repository root) - **Shell (Linux/macOS):** - - Run all tests with coverage: `./tests/python/run_tests.sh` + - Run all tests with coverage: `./tests/python/run_tests.sh` (from repository root) Both scripts: - Run all tests in `tests/python` using pytest diff --git a/setup/setup_python_path.py b/setup/setup_python_path.py index d32f489..161fb7b 100644 --- a/setup/setup_python_path.py +++ b/setup/setup_python_path.py @@ -17,6 +17,7 @@ import sys import subprocess import os +import json from pathlib import Path # Cross-platform path handling (Windows: \, Unix: /) @@ -114,8 +115,8 @@ def generate_env_file() -> None: print(f"Generated .env file : {env_file_path}") print(f"PROJECT_ROOT : {project_root}") print(f"PYTHONPATH : {shared_python_path}") - print(f"SPOTIFY_CLIENT_ID : ") - print(f"SPOTIFY_CLIENT_SECRET : \n") + print("SPOTIFY_CLIENT_ID : ") + print("SPOTIFY_CLIENT_SECRET : \n") def install_jupyter_kernel(): @@ -146,14 +147,14 @@ def install_jupyter_kernel(): try: # Install the kernel for the current user - result = subprocess.run([ + subprocess.run([ sys.executable, '-m', 'ipykernel', 'install', '--user', f'--name={kernel_name}', f'--display-name={display_name}' ], check=True, capture_output=True, text=True) - print(f"✅ Jupyter kernel registered successfully:") + print("✅ Jupyter kernel registered successfully:") print(f" Name : {kernel_name}") print(f" Display Name : {display_name}") @@ -240,7 +241,6 @@ def create_vscode_settings(): content = f.read() # Try to parse as JSON (will fail if it has comments) - import json existing_settings = json.loads(content) # Merge required settings with existing ones @@ -255,17 +255,16 @@ def create_vscode_settings(): print(" - Default kernel set to 'apim-samples'") print(" - Python interpreter configured for .venv") - except (json.JSONDecodeError, IOError) as e: - print(f"⚠️ Existing settings.json has comments or formatting issues") - print(f" Please manually add these settings to preserve your existing configuration:") - print(f" - \"jupyter.defaultKernel\": \"apim-samples\"") + except (json.JSONDecodeError, IOError): + print("⚠️ Existing settings.json has comments or formatting issues") + print(" Please manually add these settings to preserve your existing configuration:") + print(" - \"jupyter.defaultKernel\": \"apim-samples\"") print(f" - \"python.defaultInterpreterPath\": \"{required_settings['python.defaultInterpreterPath']}\"") print(f" - \"python.pythonPath\": \"{required_settings['python.pythonPath']}\"") return False else: # Create new settings file try: - import json with open(settings_file, 'w', encoding='utf-8') as f: json.dump(required_settings, f, indent=4) @@ -296,9 +295,9 @@ def validate_kernel_setup(): if 'apim-samples' in result.stdout: print("✅ APIM Samples kernel found in kernelspec list") return True - else: - print("❌ APIM Samples kernel not found in kernelspec list") - return False + + print("❌ APIM Samples kernel not found in kernelspec list") + return False except subprocess.CalledProcessError as e: print(f"❌ Failed to check kernel list: {e}") @@ -367,8 +366,6 @@ def force_kernel_consistency(): } try: - import json - # Read existing settings or create new ones existing_settings = {} if settings_file.exists(): @@ -422,7 +419,7 @@ def setup_complete_environment(): # Summary print("\n" + "="*50) print("📋 Setup Summary:") - print(f" ✅ Python path configuration: Complete") + print(" ✅ Python path configuration: Complete") print(f" {'✅' if kernel_success else '❌'} Jupyter kernel registration: {'Complete' if kernel_success else 'Failed'}") print(f" {'✅' if vscode_success else '❌'} VS Code settings: {'Complete' if vscode_success else 'Failed'}") print(f" {'✅' if consistency_success else '❌'} Kernel consistency enforcement: {'Complete' if consistency_success else 'Failed'}") diff --git a/setup/verify_local_setup.py b/setup/verify_local_setup.py index f4205a3..110d1b0 100644 --- a/setup/verify_local_setup.py +++ b/setup/verify_local_setup.py @@ -43,7 +43,7 @@ def check_virtual_environment(): expected_venv_python = venv_path / ("Scripts" if os.name == 'nt' else "bin") / "python" if not str(current_python).startswith(str(venv_path)): - print_status(f"Not using virtual environment Python", False) + print_status("Not using virtual environment Python", False) print(f" Current: {current_python}") print(f" Expected: {expected_venv_python}") return False @@ -72,7 +72,7 @@ def check_required_packages(): print_status(f"{package_name} is missing", False) missing_packages.append(package_name) - return len(missing_packages) == 0 + return not missing_packages def check_shared_modules(): @@ -85,11 +85,12 @@ def check_shared_modules(): if str(shared_python_path) not in sys.path: sys.path.insert(0, str(shared_python_path)) - # Try importing shared modules - import utils - import apimtypes - import authfactory - import apimrequests + # Try importing shared modules to verify they're accessible + # These imports are intentional for verification purposes + __import__('utils') + __import__('apimtypes') + __import__('authfactory') + __import__('apimrequests') print_status("All shared modules can be imported") return True @@ -109,9 +110,9 @@ def check_jupyter_kernel(): if 'apim-samples' in result.stdout: print_status("APIM Samples Jupyter kernel is registered") return True - else: - print_status("APIM Samples Jupyter kernel not found", False) - return False + + print_status("APIM Samples Jupyter kernel not found", False) + return False except (subprocess.CalledProcessError, FileNotFoundError): print_status("Could not check Jupyter kernel registration", False) @@ -146,8 +147,8 @@ def check_vscode_settings(): if all_found: print_status("VS Code settings are configured correctly") return True - else: - return False + + return False except Exception as e: print_status(f"Could not read VS Code settings: {e}", False) @@ -169,9 +170,9 @@ def check_env_file(): if 'PYTHONPATH=' in content and 'PROJECT_ROOT=' in content: print_status(".env file is configured correctly") return True - else: - print_status(".env file missing required configuration", False) - return False + + print_status(".env file missing required configuration", False) + return False except Exception as e: print_status(f"Could not read .env file: {e}", False) diff --git a/shared/python/apimrequests.py b/shared/python/apimrequests.py index aa5f966..bf7eeaf 100644 --- a/shared/python/apimrequests.py +++ b/shared/python/apimrequests.py @@ -4,11 +4,13 @@ import json import time +from typing import Any + import requests import urllib3 -import utils -from typing import Any + from apimtypes import HTTP_VERB, SUBSCRIPTION_KEY_PARAMETER_NAME, SLEEP_TIME_BETWEEN_REQUESTS_MS +import console # Disable SSL warnings for self-signed certificates urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -18,10 +20,13 @@ # CLASSES # ------------------------------ -class ApimRequests: +class ApimRequests: # pylint: disable=invalid-name """ Methods for making requests to the Azure API Management service. Provides single and multiple request helpers with consistent logging. + + Note: This class intentionally uses camelCase naming for methods and parameters + to maintain consistency with API naming conventions and existing usage. """ @@ -119,23 +124,23 @@ def _request(self, method: HTTP_VERB, path: str, headers: list[any] = None, data try: if msg: - utils.print_message(msg, blank_above = True) + console.print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - utils.print_info(f'{method.value} {url}') + console.print_info(f'{method.value} {url}') merged_headers = self.headers.copy() if headers: merged_headers.update(headers) - utils.print_info(merged_headers) + console.print_info(merged_headers) - response = requests.request(method.value, url, headers = merged_headers, json = data, verify = False) + response = requests.request(method.value, url, headers = merged_headers, json = data, verify = False, timeout = 30) content_type = response.headers.get('Content-Type') @@ -152,10 +157,10 @@ def _request(self, method: HTTP_VERB, path: str, headers: list[any] = None, data return responseBody except requests.exceptions.RequestException as e: - utils.print_error(f'Error making request: {e}') + console.print_error(f'Error making request: {e}') return None - def _multiRequest(self, method: HTTP_VERB, path: str, runs: int, headers: list[any] = None, data: any = None, msg: str | None = None, printResponse: bool = True, sleepMs: int | None = None) -> list[dict[str, Any]]: + def _multiRequest(self, method: HTTP_VERB, path: str, runs: int, headers: list[any] = None, data: any = None, msg: str | None = None, printResponse: bool = True, sleepMs: int | None = None) -> list[dict[str, Any]]: # pylint: disable=invalid-name,too-many-locals """ Make multiple requests to the Azure API Management service. @@ -180,22 +185,22 @@ def _multiRequest(self, method: HTTP_VERB, path: str, runs: int, headers: list[a try: if msg: - utils.print_message(msg, blank_above = True) + console.print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - utils.print_info(f'{method.value} {url}') + console.print_info(f'{method.value} {url}') for i in range(runs): - utils.print_info(f'▶️ Run {i + 1}/{runs}:') + console.print_info(f'▶️ Run {i + 1}/{runs}:') start_time = time.time() response = session.request(method.value, url, json = data, verify = False) response_time = time.time() - start_time - utils.print_info(f'⌚ {response_time:.2f} seconds') + console.print_info(f'⌚ {response_time:.2f} seconds') self._print_response_code(response) @@ -229,16 +234,16 @@ def _print_response(self, response) -> None: """ self._print_response_code(response) - utils.print_val('Response headers', response.headers, True) + console.print_val('Response headers', response.headers, True) if response.status_code == 200: try: data = json.loads(response.text) - utils.print_val('Response body', json.dumps(data, indent = 4), True) + console.print_val('Response body', json.dumps(data, indent = 4), True) except Exception: - utils.print_val('Response body', response.text, True) + console.print_val('Response body', response.text, True) else: - utils.print_val('Response body', response.text, True) + console.print_val('Response body', response.text, True) def _print_response_code(self, response) -> None: """ @@ -246,13 +251,13 @@ def _print_response_code(self, response) -> None: """ if 200 <= response.status_code < 300: - status_code_str = f'{utils.BOLD_G}{response.status_code} - {response.reason}{utils.RESET}' + status_code_str = f'{console.BOLD_G}{response.status_code} - {response.reason}{console.RESET}' elif response.status_code >= 400: - status_code_str = f'{utils.BOLD_R}{response.status_code} - {response.reason}{utils.RESET}' + status_code_str = f'{console.BOLD_R}{response.status_code} - {response.reason}{console.RESET}' else: status_code_str = str(response.status_code) - utils.print_val('Response status', status_code_str) + console.print_val('Response status', status_code_str) def _poll_async_operation(self, location_url: str, headers: dict = None, timeout: int = 60, poll_interval: int = 2) -> requests.Response | None: """ @@ -271,27 +276,28 @@ def _poll_async_operation(self, location_url: str, headers: dict = None, timeout while time.time() - start_time < timeout: try: - utils.print_info(f'GET {location_url}', True) - utils.print_info(headers) - response = requests.get(location_url, headers = headers or {}, verify = False) + console.print_info(f'GET {location_url}', True) + console.print_info(headers) + response = requests.get(location_url, headers = headers or {}, verify = False, timeout = 30) - utils.print_info(f'Polling operation - Status: {response.status_code}') + console.print_info(f'Polling operation - Status: {response.status_code}') if response.status_code == 200: - utils.print_ok('Async operation completed successfully!') + console.print_ok('Async operation completed successfully!') return response - elif response.status_code == 202: - utils.print_info(f'Operation still in progress, waiting {poll_interval} seconds...') + + if response.status_code == 202: + console.print_info(f'Operation still in progress, waiting {poll_interval} seconds...') time.sleep(poll_interval) else: - utils.print_error(f'Unexpected status code during polling: {response.status_code}') + console.print_error(f'Unexpected status code during polling: {response.status_code}') return response except requests.exceptions.RequestException as e: - utils.print_error(f'Error polling operation: {e}') + console.print_error(f'Error polling operation: {e}') return None - utils.print_error(f'Async operation timeout reached after {timeout} seconds') + console.print_error(f'Async operation timeout reached after {timeout} seconds') return None # ------------------------------ @@ -347,7 +353,7 @@ def multiGet(self, path: str, runs: int, headers = None, data = None, msg: str | return self._multiRequest(method = HTTP_VERB.GET, path = path, runs = runs, headers = headers, data = data, msg = msg, printResponse = printResponse, sleepMs = sleepMs) - def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | None = None, printResponse = True, timeout = 60, poll_interval = 2) -> Any: + def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | None = None, printResponse = True, timeout = 60, poll_interval = 2) -> Any: # pylint: disable=invalid-name,too-many-locals """ Make an async POST request to the Azure API Management service and poll until completion. @@ -366,32 +372,32 @@ def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | try: if msg: - utils.print_message(msg, blank_above = True) + console.print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - utils.print_info(f'POST {url}') + console.print_info(f'POST {url}') merged_headers = self.headers.copy() if headers: merged_headers.update(headers) - utils.print_info(merged_headers) + console.print_info(merged_headers) # Make the initial async request - response = requests.request(HTTP_VERB.POST.value, url, headers = merged_headers, json = data, verify = False) + response = requests.request(HTTP_VERB.POST.value, url, headers = merged_headers, json = data, verify = False, timeout = 30) - utils.print_info(f'Initial response status: {response.status_code}') + console.print_info(f'Initial response status: {response.status_code}') if response.status_code == 202: # Accepted - async operation started location_header = response.headers.get('Location') if location_header: - utils.print_info(f'Found Location header: {location_header}') + console.print_info(f'Found Location header: {location_header}') # Poll the location URL until completion final_response = self._poll_async_operation(location_header, timeout = timeout, poll_interval = poll_interval ) @@ -409,29 +415,29 @@ def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | responseBody = final_response.text return responseBody - else: - utils.print_error('Async operation failed or timed out') - return None - else: - utils.print_error('No Location header found in 202 response') - if printResponse: - self._print_response(response) + + console.print_error('Async operation failed or timed out') return None - else: - # Non-async response, handle normally + + console.print_error('No Location header found in 202 response') if printResponse: self._print_response(response) + return None - content_type = response.headers.get('Content-Type') - responseBody = None + # Non-async response, handle normally + if printResponse: + self._print_response(response) - if content_type and 'application/json' in content_type: - responseBody = json.dumps(response.json(), indent = 4) - else: - responseBody = response.text + content_type = response.headers.get('Content-Type') + responseBody = None - return responseBody + if content_type and 'application/json' in content_type: + responseBody = json.dumps(response.json(), indent = 4) + else: + responseBody = response.text + + return responseBody except requests.exceptions.RequestException as e: - utils.print_error(f'Error making request: {e}') + console.print_error(f'Error making request: {e}') return None diff --git a/shared/python/apimtesting.py b/shared/python/apimtesting.py index 62a5460..18feec1 100644 --- a/shared/python/apimtesting.py +++ b/shared/python/apimtesting.py @@ -93,17 +93,17 @@ def print_summary(self) -> None: print(f' Deployment : {self.deployment.name if self.deployment else 'N/A'}\n') # Test statistics with visual indicators - print(f'📊 Test Execution Statistics:') + print('📊 Test Execution Statistics:') print(f' • Total Tests : {self.total_tests:>5}') print(f' • Tests Passed : {self.tests_passed:>5}') print(f' • Tests Failed : {self.tests_failed:>5} {'❌' if self.tests_failed > 0 else ''}') print(f' • Success Rate : {success_rate:>5.1f}%\n') # Overall result - if self.tests_failed == 0 and self.total_tests > 0: + if not self.tests_failed and self.total_tests > 0: print('🎉 OVERALL RESULT: ALL TESTS PASSED! 🎉') print('✨ Congratulations! Your APIM deployment is working flawlessly! ✨') - elif self.total_tests == 0: + elif not self.total_tests: print('⚠️ OVERALL RESULT: NO TESTS EXECUTED') print('🤔 Consider adding some tests to validate your deployment.') else: diff --git a/shared/python/apimtypes.py b/shared/python/apimtypes.py index 8f3db60..ebe5fe0 100644 --- a/shared/python/apimtypes.py +++ b/shared/python/apimtypes.py @@ -49,6 +49,37 @@ def _get_project_root() -> Path: SUBSCRIPTION_KEY_PARAMETER_NAME = 'api-key' SLEEP_TIME_BETWEEN_REQUESTS_MS = 50 +# Explicitly define what is exported with 'from apimtypes import *' +__all__ = [ + # Constants + 'DEFAULT_XML_POLICY_PATH', + 'HELLO_WORLD_XML_POLICY_PATH', + 'REQUEST_HEADERS_XML_POLICY_PATH', + 'BACKEND_XML_POLICY_PATH', + 'API_ID_XML_POLICY_PATH', + 'SUBSCRIPTION_KEY_PARAMETER_NAME', + 'SLEEP_TIME_BETWEEN_REQUESTS_MS', + # Enums + 'Role', + 'APIMNetworkMode', + 'APIM_SKU', + 'HTTP_VERB', + 'INFRASTRUCTURE', + # Data classes and regular classes + 'Endpoints', + 'Output', + 'API', + 'APIOperation', + 'GET_APIOperation', + 'GET_APIOperation2', + 'POST_APIOperation', + 'NamedValue', + 'PolicyFragment', + 'Product', + # Functions + '_get_project_root', +] + # ------------------------------ # PRIVATE METHODS @@ -139,9 +170,7 @@ class INFRASTRUCTURE(StrEnum): APPGW_APIM_PE = 'appgw-apim-pe' # Application Gateway connected to Azure API Management (Standard V2) via Private Link -class Endpoints(object): - - +class Endpoints: """ Represents a set of endpoints to call """ @@ -159,7 +188,7 @@ def __init__(self, deployment: INFRASTRUCTURE): self.deployment = deployment -class Output(object): +class Output: """ Represents the output of a command or deployment, including success status, raw text, and parsed JSON data. """ @@ -179,7 +208,7 @@ def __init__(self, success: bool, text: str): self.jsonParseException = None # Check if the exact string is JSON. - if (is_string_json(text)): + if is_string_json(text): try: self.json_data = json.loads(text) except json.JSONDecodeError as e: @@ -205,6 +234,8 @@ def get(self, key: str, label: str = '', secure: bool = False, suppress_logging: """ try: + deployment_output: Any + if not isinstance(self.json_data, dict): raise KeyError('json_data is not a dict') @@ -224,6 +255,8 @@ def get(self, key: str, label: str = '', secure: bool = False, suppress_logging: deployment_output = output_entry['value'] elif key in self.json_data: deployment_output = self.json_data[key]['value'] + else: + raise KeyError(f"Output key '{key}' not found in deployment outputs") if not suppress_logging and label: if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: @@ -238,7 +271,7 @@ def get(self, key: str, label: str = '', secure: bool = False, suppress_logging: print_error(error) if label: - raise Exception(error) + raise Exception(error) from e return None @@ -257,6 +290,8 @@ def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logg """ try: + deployment_output: Any + if not isinstance(self.json_data, dict): raise KeyError('json_data is not a dict') @@ -276,6 +311,8 @@ def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logg deployment_output = output_entry['value'] elif key in self.json_data: deployment_output = self.json_data[key]['value'] + else: + raise KeyError(f"Output key '{key}' not found in deployment outputs") if not suppress_logging and label: if secure and isinstance(deployment_output, str) and len(deployment_output) >= 4: @@ -296,7 +333,6 @@ def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logg return ast.literal_eval(deployment_output) except (ValueError, SyntaxError) as e: print_error(f'Failed to parse deployment output as Python literal. Error: {e}') - pass # Return the original result if it's not a string or can't be parsed return deployment_output @@ -306,7 +342,7 @@ def getJson(self, key: str, label: str = '', secure: bool = False, suppress_logg print_error(error) if label: - raise Exception(error) + raise Exception(error) from e return None @@ -385,8 +421,8 @@ def __init__(self, name: str, displayName: str, urlTemplate: str, method: HTTP_V if not isinstance(method, HTTP_VERB): try: method = HTTP_VERB(method).value - except Exception: - raise ValueError(f'Invalid HTTP_VERB: {method}') + except Exception as exc: + raise ValueError(f'Invalid HTTP_VERB: {method}') from exc self.name = name self.displayName = displayName diff --git a/shared/python/authfactory.py b/shared/python/authfactory.py index fd6628d..2b47a04 100644 --- a/shared/python/authfactory.py +++ b/shared/python/authfactory.py @@ -3,9 +3,9 @@ """ from typing import Any +import time from users import User import jwt -import time # ------------------------------ diff --git a/shared/python/azure_resources.py b/shared/python/azure_resources.py index 00bfb68..55009bb 100644 --- a/shared/python/azure_resources.py +++ b/shared/python/azure_resources.py @@ -17,6 +17,29 @@ from apimtypes import INFRASTRUCTURE, Endpoints, Output from console import print_ok, print_warning, print_error, print_val, print_message, print_info, print_command, print_success +# Explicitly define what is exported with 'from azure_resources import *' +__all__ = [ + # Public functions + 'cleanup_old_jwt_signing_keys', + 'check_apim_blob_permissions', + 'find_infrastructure_instances', + 'create_resource_group', + 'get_azure_role_guid', + 'does_resource_group_exist', + 'get_resource_group_location', + 'get_account_info', + 'get_deployment_name', + 'get_frontdoor_url', + 'get_apim_url', + 'get_appgw_endpoint', + 'get_infra_rg_name', + 'get_unique_suffix_for_resource_group', + 'get_rg_name', + 'get_endpoints', + # Private functions (exported for backward compatibility) + '_run', +] + # ------------------------------ # PRIVATE FUNCTIONS @@ -48,9 +71,13 @@ def _run(command: str, ok_message: str = '', error_message: str = '', print_outp try: output_text = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT).decode('utf-8') success = True + except subprocess.CalledProcessError as e: + output_bytes = e.output if isinstance(e.output, (bytes, bytearray)) else b'' + output_text = output_bytes.decode('utf-8') + success = False except Exception as e: - # Handles both CalledProcessError and any custom/other exceptions (for test mocks) - output_text = getattr(e, 'output', b'').decode('utf-8') if hasattr(e, 'output') and isinstance(e.output, (bytes, bytearray)) else str(e) + # Covers unexpected errors (and test mocks) without assuming an 'output' attribute exists. + output_text = str(e) success = False if print_errors: @@ -72,7 +99,8 @@ def _run(command: str, ok_message: str = '', error_message: str = '', print_outp if l and print_warnings: print_warning(l) continue - elif l.lower().startswith('error'): + + if l.lower().startswith('error'): if l and print_errors: print_error(l) continue @@ -247,7 +275,7 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou ) if role_assignment_output.success and role_assignment_output.text.strip(): - print_success(f'Role assignment found! APIM managed identity has Storage Blob Data Reader permissions.') + print_success('Role assignment found! APIM managed identity has Storage Blob Data Reader permissions.') # Additional check: try to test blob access using the managed identity print_info('Testing actual blob access...') @@ -264,8 +292,8 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou else: print_warning('Role assignment exists but blob access test failed. Permissions may still be propagating...') - if elapsed_time == 0: - print_info(f'Role assignment not found yet. Waiting for Azure AD propagation...') + if not elapsed_time: + print_info('Role assignment not found yet. Waiting for Azure AD propagation...') else: print_info(f'Still waiting... ({elapsed_time // 60}m {elapsed_time % 60}s elapsed)') diff --git a/shared/python/charts.py b/shared/python/charts.py index 708a57d..d73e361 100644 --- a/shared/python/charts.py +++ b/shared/python/charts.py @@ -4,11 +4,12 @@ This module will likely be moved to the /shared/python directory in the future once it's more generic. """ +import json + import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle as pltRectangle import matplotlib as mpl -import json # ------------------------------ @@ -17,7 +18,7 @@ # TODO: A specialized barchart for multi-request scenarios should be created and use a more generic base class barchart. # TODO: BarChart should be a base class for other chart types once it's more generic. -class BarChart(object): +class BarChart: """ Class for creating bar charts with colored bars based on backend indexes. """ diff --git a/shared/python/console.py b/shared/python/console.py index d65042e..8663df1 100644 --- a/shared/python/console.py +++ b/shared/python/console.py @@ -32,6 +32,18 @@ # Thread-safe print lock _print_lock = threading.Lock() +# Explicitly define what is exported with 'from console import *' +__all__ = [ + # Constants + 'BOLD_B', 'BOLD_G', 'BOLD_R', 'BOLD_Y', 'BOLD_C', 'BOLD_M', 'BOLD_W', 'RESET', + 'THREAD_COLORS', 'CONSOLE_WIDTH', + # Private (but re-exported for backward compatibility) + '_print_lock', '_print_log', + # Public functions + 'print_command', 'print_error', 'print_info', 'print_message', + 'print_ok', 'print_success', 'print_warning', 'print_val', 'print_header', +] + # ------------------------------ # PRIVATE METHODS diff --git a/shared/python/infrastructures.py b/shared/python/infrastructures.py index 0238e1f..5bacd67 100644 --- a/shared/python/infrastructures.py +++ b/shared/python/infrastructures.py @@ -6,15 +6,26 @@ import os import time import traceback -from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path -from apimtypes import * -import utils -from utils import Output +from typing import List +from concurrent.futures import ThreadPoolExecutor, as_completed +import requests + +from apimtypes import ( + API, + APIM_SKU, + APIMNetworkMode, + GET_APIOperation, + HELLO_WORLD_XML_POLICY_PATH, + INFRASTRUCTURE, + PolicyFragment, +) from console import ( BOLD_R, BOLD_Y, RESET, THREAD_COLORS, _print_lock, _print_log, print_error, print_info, print_message, print_ok, print_success, print_warning ) +import utils +from utils import Output # ------------------------------ @@ -159,14 +170,13 @@ def _verify_infrastructure(self, rg_name: str) -> bool: if self._verify_infrastructure_specific(rg_name): print('\n🎉 Infrastructure verification completed successfully!') return True - else: - print('\n❌ Infrastructure-specific verification failed!') - return False - else: - print('\n❌ APIM service not found!') + print('\n❌ Infrastructure-specific verification failed!') return False + print('\n❌ APIM service not found!') + return False + except Exception as e: print(f'\n⚠️ Verification failed with error: {str(e)}') return False @@ -245,10 +255,10 @@ def deploy_infrastructure(self, is_update: bool = False) -> 'utils.Output': # Write the parameters file params_file_path = infra_dir / 'params.json' - with open(params_file_path, 'w') as file: + with open(params_file_path, 'w', encoding='utf-8') as file: file.write(json.dumps(bicep_parameters_format)) - print(f"📝 Updated the policy XML in the bicep parameters file 'params.json'") + print("📝 Updated the policy XML in the bicep parameters file 'params.json'") # ------------------------------ # EXECUTE DEPLOYMENT @@ -273,7 +283,7 @@ def deploy_infrastructure(self, is_update: bool = False) -> 'utils.Output': apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL', suppress_logging = True) apim_apis = output.getJson('apiOutputs', 'APIs', suppress_logging = True) - print(f'\n📋 Infrastructure Details:') + print('\n📋 Infrastructure Details:') print(f' Resource Group : {self.rg_name}') print(f' Location : {self.rg_location}') print(f' APIM SKU : {self.apim_sku.value}') @@ -392,7 +402,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: total = len(pending_connections) print(f' Found {total} pending private link service connection(s)') - if total == 0: + if not total: print(' ✅ No pending connections found - may already be approved') return True @@ -447,7 +457,7 @@ def _disable_apim_public_access(self) -> bool: } params_file_path = infra_dir / 'params.json' - with open(params_file_path, 'w') as file: + with open(params_file_path, 'w', encoding='utf-8') as file: file.write(json.dumps(bicep_parameters_format)) print(' 📝 Updated parameters to disable public access') @@ -484,8 +494,6 @@ def _verify_apim_connectivity(self, apim_gateway_url: str) -> bool: try: # Use the health check endpoint which doesn't require a subscription key - import requests - healthcheck_url = f'{apim_gateway_url}/status-0123456789abcdef' print(f' Testing connectivity to health check endpoint: {healthcheck_url}') @@ -638,7 +646,7 @@ def _create_keyvault_certificate(self, key_vault_name: str) -> bool: Returns: bool: True if certificate was created or already exists, False on failure. """ - print(f'\n 🔐 Creating self-signed certificate in Key Vault...\n') + print('\n 🔐 Creating self-signed certificate in Key Vault...\n') print(f' Key Vault : {key_vault_name}') print(f' Certificate : {self.CERT_NAME}') print(f' Domain : {self.DOMAIN_NAME}') @@ -651,7 +659,7 @@ def _create_keyvault_certificate(self, key_vault_name: str) -> bool: ) if check_output.success: - print(f' ✅ Certificate already exists in Key Vault') + print(' ✅ Certificate already exists in Key Vault') return True # Build the certificate policy JSON for Azure CLI @@ -683,8 +691,8 @@ def _create_keyvault_certificate(self, key_vault_name: str) -> bool: escaped_policy = cert_policy.replace('"', '\\"') create_output = utils.run( f'az keyvault certificate create --vault-name {key_vault_name} --name {self.CERT_NAME} --policy "{escaped_policy}"', - f'✅ Certificate created successfully in Key Vault', - f'❌ Failed to create certificate in Key Vault', + '✅ Certificate created successfully in Key Vault', + '❌ Failed to create certificate in Key Vault', print_command_to_run = False ) @@ -742,7 +750,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: total = len(pending_connections) print(f' Found {total} pending private link service connection(s)') - if total == 0: + if not total: print(' ✅ No pending connections found - this is normal for VNet integration scenarios') print(' ℹ️ Application Gateway will access APIM through VNet integration') return True @@ -798,7 +806,7 @@ def _disable_apim_public_access(self) -> bool: } params_file_path = infra_dir / 'params.json' - with open(params_file_path, 'w') as file: + with open(params_file_path, 'w', encoding='utf-8') as file: file.write(json.dumps(bicep_parameters_format)) print(' 📝 Updated parameters to disable public access') @@ -835,8 +843,6 @@ def _verify_apim_connectivity(self, apim_gateway_url: str) -> bool: try: # Use the health check endpoint which doesn't require a subscription key - import requests - healthcheck_url = f'{apim_gateway_url}/status-0123456789abcdef' print(f' Testing connectivity to health check endpoint: {healthcheck_url}') @@ -868,7 +874,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: utils.run( f'az keyvault create --name {key_vault_name} --resource-group {self.rg_name} --location {self.rg_location} --enable-rbac-authorization true', f'✅ Key Vault created: {key_vault_name}', - f'❌ Failed to create Key Vault', + '❌ Failed to create Key Vault', print_command_to_run = False ) @@ -881,7 +887,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: print_errors = False ) if not assign_kv_role.success: - print(f' ❌ Failed to assign Key Vault Certificates Officer role to current user') + print(' ❌ Failed to assign Key Vault Certificates Officer role to current user') return False print(' ✅ Assigned Key Vault Certificates Officer role to current user') @@ -1172,7 +1178,7 @@ def log_warning(msg): log_error(f"✗ Exception cleaning up {resource['type']} '{resource['name']}': {str(e)}") # Summary - if failed_count == 0: + if not failed_count: log_ok(f'All {len(resources)} resource(s) cleaned up successfully!') else: log_warning(f'Completed with {failed_count} failure(s) out of {len(resources)} total resources.') @@ -1227,7 +1233,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: resources_to_cleanup = [] # List CognitiveService accounts - output = utils.run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + output = utils.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1238,7 +1244,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: }) # List APIM resources - output = utils.run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + output = utils.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1249,7 +1255,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: }) # List Key Vault resources - output = utils.run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + output = utils.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1337,7 +1343,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: resources_to_cleanup = [] # List CognitiveService accounts - output = utils.run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + output = utils.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1348,7 +1354,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: }) # List APIM resources - output = utils.run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + output = utils.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1359,7 +1365,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: }) # List Key Vault resources - output = utils.run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + output = utils.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1479,7 +1485,7 @@ def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[in print_error(f"❌ Exception during cleanup for {deployment.value}-{task['index']}: {str(e)}") # Final summary - if failed_count == 0: + if not failed_count: print_ok(f'All {len(indexes_list)} infrastructure cleanups completed successfully!') else: print_warning(f'Completed with {failed_count} failures out of {len(indexes_list)} total cleanups.') diff --git a/shared/python/json_utils.py b/shared/python/json_utils.py index 71ceaf7..e6b4027 100644 --- a/shared/python/json_utils.py +++ b/shared/python/json_utils.py @@ -66,8 +66,8 @@ def extract_json(text: str) -> Any: decoder = json.JSONDecoder() - for start in range(len(text)): - if text[start] in ('{', '['): + for start, char in enumerate(text): + if char in ('{', '['): try: obj, _ = decoder.raw_decode(text[start:]) return obj diff --git a/shared/python/users.py b/shared/python/users.py index 8e01bf8..d875a9a 100644 --- a/shared/python/users.py +++ b/shared/python/users.py @@ -33,17 +33,17 @@ class User: # CONSTRUCTOR # ------------------------------ - def __init__(self, id: str, name: str, roles: list[str] = None) -> None: + def __init__(self, user_id: str, name: str, roles: list[str] = None) -> None: """ Initializes a User instance with a unique ID, name, and roles. Args: - id (str): The user's unique ID. + user_id (str): The user's unique ID. name (str): The user's name. roles (list, optional): The user's roles. Defaults to empty list. """ - self.id = id + self.id = user_id self.name = name self.roles = roles if roles is not None else [] @@ -111,8 +111,6 @@ def get_user_by_role(role_or_roles: str | list[str]) -> 'User | None': User | None: A random user with one of the given roles, or a user with no roles if Role.NONE is specified, or None if no match. """ - from apimtypes import Role - if isinstance(role_or_roles, str): roles = [role_or_roles] else: diff --git a/shared/python/utils.py b/shared/python/utils.py index e4a791d..7e254ab 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -8,15 +8,15 @@ import os import subprocess import time -import traceback import string import secrets import base64 import inspect from pathlib import Path -import apimtypes - from typing import Any +import azure_resources as az + +import apimtypes from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, _get_project_root # ------------------------------ @@ -26,8 +26,36 @@ # The following imports are re-exported from the modules that are now split out from utils. # The re-exports are in place to maintain backward compatibility with existing code. # For new code, please import directly from the relevant modules. -from console import * -from azure_resources import * +from console import ( + print_error, + print_info, + print_message, + print_success, + print_warning, + print_val, +) +from azure_resources import ( + check_apim_blob_permissions, + cleanup_old_jwt_signing_keys, + create_resource_group, + find_infrastructure_instances, + get_apim_url, + get_appgw_endpoint, + get_frontdoor_url, + get_infra_rg_name, + get_resource_group_location, + _run as run +) + + +def does_resource_group_exist(rg_name: str) -> bool: + """Check whether an Azure resource group exists. + + This wrapper keeps `utils.does_resource_group_exist` monkeypatchable while + still delegating to the underlying `azure_resources` implementation. + """ + + return az.does_resource_group_exist(rg_name) # ------------------------------ # HELPER FUNCTIONS @@ -131,9 +159,9 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow elif not should_proceed: print('❌ Infrastructure deployment cancelled by user.') raise SystemExit("User cancelled deployment") - except (KeyboardInterrupt, EOFError): + except (KeyboardInterrupt, EOFError) as exc: print('\n❌ Infrastructure deployment cancelled by user (Escape/Ctrl+C pressed).') - raise SystemExit("User cancelled deployment") + raise SystemExit("User cancelled deployment") from exc # Check infrastructure existence for the normal flow infrastructure_exists = does_resource_group_exist(get_infra_rg_name(self.deployment, self.index)) if not allow_update else False @@ -162,33 +190,33 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow ] # Execute the infrastructure creation script with real-time output streaming and UTF-8 encoding to handle Unicode characters properly - process = subprocess.Popen(cmd_args, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, text = True, - bufsize = 1, universal_newlines = True, encoding = 'utf-8', errors = 'replace') + with subprocess.Popen(cmd_args, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, text = True, + bufsize = 1, universal_newlines = True, encoding = 'utf-8', errors = 'replace') as process: - try: - # Stream output in real-time - for line in process.stdout: - print(line.rstrip()) - except Exception as e: - print(f'Error reading subprocess output: {e}') + try: + # Stream output in real-time + for line in process.stdout: + print(line.rstrip()) + except Exception as e: + print(f'Error reading subprocess output: {e}') - # Wait for process to complete - process.wait() + # Wait for process to complete + process.wait() - if process.returncode != 0: - print("❌ Infrastructure creation failed!") - raise SystemExit(1) + if process.returncode: + print("❌ Infrastructure creation failed!") + raise SystemExit(1) return True return True - except KeyboardInterrupt: + except KeyboardInterrupt as exc: print("\n🚫 Infrastructure deployment cancelled by user.") - raise SystemExit("User cancelled deployment") + raise SystemExit("User cancelled deployment") from exc except Exception as e: print(f"❌ Infrastructure deployment failed with error: {e}") - raise SystemExit(1) + raise SystemExit(1) from e class NotebookHelper: """ @@ -318,7 +346,7 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | desired_index_str = self._get_current_index() if self._get_current_index() is not None else 'N/A' desired_location = self.rg_location - print(f'\n Create a NEW infrastructure:\n') + print('\n Create a NEW infrastructure:\n') # Column headers if QUERY_RG_LOCATION: print(f' {'#':>3} {'Infrastructure':<20} {'Index':>8} {'Resource Group':<35} {'Location':<15}') @@ -332,7 +360,7 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | display_options.append(('create_new', self.deployment, self._get_current_index())) option_counter += 1 - print(f'\n Or select an EXISTING infrastructure:\n') + print('\n Or select an EXISTING infrastructure:\n') # Column headers if QUERY_RG_LOCATION: print(f' {'#':>3} {'Infrastructure':<20} {'Index':>8} {'Resource Group':<35} {'Location':<15}') @@ -426,7 +454,7 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: """ # Check infrastructure availability and let user select or create - print(f'Checking desired infrastructure availability...\n') + print('Checking desired infrastructure availability...\n') print(f' Infrastructure : {self.deployment.value}') print(f' Index : {self.index}') print(f' Resource group : {self.rg_name}\n') @@ -452,16 +480,16 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: self.rg_name = get_infra_rg_name(self.deployment, self.index) # Verify the updates were applied correctly - print(f'📝 Updated infrastructure variables') + print('📝 Updated infrastructure variables') else: print('✅ Infrastructure selection already completed in this session') else: print('✅ Desired infrastructure already exists, proceeding with sample deployment') # Deploy the sample APIs to the selected infrastructure - print(f'\n------------------------------------------------') - print(f'\nSAMPLE DEPLOYMENT') - print(f'\nDeploying sample to:\n') + print('\n------------------------------------------------') + print('\nSAMPLE DEPLOYMENT') + print('\nDeploying sample to:\n') print(f' Infrastructure : {self.deployment.value}') print(f' Index : {self.index}') print(f' Resource group : {self.rg_name}\n') @@ -590,7 +618,7 @@ def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: st params_file_path = os.path.join(bicep_dir, bicep_parameters_file) # Write the updated bicep parameters to the specified parameters file - with open(params_file_path, 'w') as file: + with open(params_file_path, 'w', encoding='utf-8') as file: file.write(json.dumps(bicep_parameters_format)) print(f'📝 Updated the policy XML in the bicep parameters file {bicep_parameters_file}') @@ -753,7 +781,7 @@ def does_infrastructure_exist(infrastructure: INFRASTRUCTURE, index: int, allow_ """ print(f'� Debug: does_infrastructure_exist called with allow_update_option={allow_update_option}') - print(f'�🔍 Checking if infrastructure already exists...') + print('�🔍 Checking if infrastructure already exists...') rg_name = get_infra_rg_name(infrastructure, index) @@ -831,7 +859,9 @@ def determine_policy_path(policy_xml_filepath_or_filename: str, sample_name: str # Legacy mode check: if named_values is None, always treat as legacy (backwards compatibility) # OR if it looks like a path (contains separators or is absolute) + # Note: Check for leading slash to handle POSIX paths on Windows if (path_obj.is_absolute() or + policy_xml_filepath_or_filename.startswith('/') or '/' in policy_xml_filepath_or_filename or '\\' in policy_xml_filepath_or_filename): # Legacy mode: treat as full path @@ -867,7 +897,7 @@ def determine_policy_path(policy_xml_filepath_or_filename: str, sample_name: str raise ValueError('Not running from within a samples directory') except Exception as e: - raise ValueError(f'Could not auto-detect sample name. Please provide sample_name parameter explicitly. Error: {e}') + raise ValueError(f'Could not auto-detect sample name. Please provide sample_name parameter explicitly. Error: {e}') from e # Construct the full path project_root = apimtypes._get_project_root() @@ -921,76 +951,15 @@ def read_policy_xml(policy_xml_filepath_or_filename: str, named_values: dict[str return policy_template_xml -def run(command: str, ok_message: str = '', error_message: str = '', print_output: bool = False, print_command_to_run: bool = True, print_errors: bool = True, print_warnings: bool = True) -> Output: - """ - Execute a shell command, log the command and its output, and attempt to extract JSON from the output. - - Args: - command (str): The shell command to execute. - ok_message (str, optional): Message to print if the command succeeds. Defaults to ''. - error_message (str, optional): Message to print if the command fails. Defaults to ''. - print_output (bool, optional): Whether to print the command output on failure. Defaults to False. - print_command_to_run (bool, optional): Whether to print the command before running it. Defaults to True. - print_errors (bool, optional): Whether to log error lines from the output. Defaults to True. - print_warnings (bool, optional): Whether to log warning lines from the output. Defaults to True. - - Returns: - Output: An Output object containing: - - success (bool): True if the command succeeded, False otherwise. - - text (str): The raw output from the command. - - json_data (any, optional): Parsed JSON object or array if found in the output, else None. - """ - - if print_command_to_run: - print_command(command) - - start_time = time.time() - - # Execute the command and capture the output - - try: - output_text = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT).decode('utf-8') - success = True - except Exception as e: - # Handles both CalledProcessError and any custom/other exceptions (for test mocks) - output_text = getattr(e, 'output', b'').decode('utf-8') if hasattr(e, 'output') and isinstance(e.output, (bytes, bytearray)) else str(e) - success = False - - if print_errors: - print_error(f'Command failed with error: {output_text}', duration = f'[{int((time.time() - start_time) // 60)}m:{int((time.time() - start_time) % 60)}s]') - traceback.print_exc() - - if print_output: - print(f'Command output:\n{output_text}') - - minutes, seconds = divmod(time.time() - start_time, 60) - - # Only print failures, warnings, or errors if print_output is True - if print_output: - for line in output_text.splitlines(): - l = line.strip() - - # Only log and skip lines that start with 'warning' or 'error' (case-insensitive) - if l.lower().startswith('warning'): - if l and print_warnings: - print_warning(l) - continue - elif l.lower().startswith('error'): - if l and print_errors: - print_error(l) - continue - - print_message = print_ok if success else print_error - - if (ok_message or error_message): - print_message(ok_message if success else error_message, output_text if not success or print_output else '', f'[{int(minutes)}m:{int(seconds)}s]') - - return Output(success, output_text) - # Validation functions will raise ValueError if the value is not valid -validate_http_verb = lambda val: HTTP_VERB(val) -validate_sku = lambda val: APIM_SKU(val) +def validate_http_verb(val): + """Validate HTTP verb value.""" + return HTTP_VERB(val) + +def validate_sku(val): + """Validate APIM SKU value.""" + return APIM_SKU(val) def validate_infrastructure(infra: INFRASTRUCTURE, supported_infras: list[INFRASTRUCTURE]) -> None: """ @@ -1090,31 +1059,30 @@ def get_endpoints(deployment: INFRASTRUCTURE, rg_name: str) -> Endpoints: return endpoints -def get_json(input: str) -> Any: +def get_json(json_str: str) -> Any: """ Safely parse a JSON string or file content into a Python object. Args: - input (str): The JSON string or file content to parse. + json_str (str): The JSON string or file content to parse. Returns: Any: The parsed JSON object, or None if parsing fails. """ # If the result is a string, try to parse it as JSON - if isinstance(input, str): + if isinstance(json_str, str): # First try JSON parsing (handles double quotes) try: - return json.loads(input) + return json.loads(json_str) except json.JSONDecodeError: pass # If JSON fails, try Python literal evaluation (handles single quotes) try: - return ast.literal_eval(input) + return ast.literal_eval(json_str) except (ValueError, SyntaxError) as e: print_error(f'Failed to parse deployment output as Python literal. Error: {e}') - pass # Return the original result if it's not a string or can't be parsed - return input + return json_str diff --git a/tests/README.md b/tests/README.md index c4270a1..f69794a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,10 +1,123 @@ -# APIM Samples Test Matrix +# APIM Samples Testing -This document outlines the compatibility between samples and infrastructure types, providing a comprehensive test matrix to ensure all components work correctly in both local development and Codespaces environments. The format allows for manual check-off during testing sessions. +This directory contains all testing infrastructure and code quality tools for the APIM Samples repository. -## Printable Test Checklist +## Quick Start -A printable checklist is maintained in the [Test-Matrix.md](./Test-Matrix.md) file. +### Run All Checks (Recommended) + +The fastest way to validate your code changes: + +```powershell +# From repository root +.\tests\python\check_python.ps1 +``` + +```bash +# From repository root +./tests/python/check_python.sh +``` + +This runs both pylint (code linting) and pytest (unit tests) with a single command. + +## Code Quality Tools + +### Combined Checks (check_python) + +**Preferred method** - Runs both linting and testing: + +```powershell +# Windows +.\tests\python\check_python.ps1 # Run all checks +.\tests\python\check_python.ps1 -ShowLintReport # Include detailed pylint report +``` + +```bash +# Linux/macOS +./tests/python/check_python.sh # Run all checks +./tests/python/check_python.sh --show-report # Include detailed pylint report +``` + +### Linting Only (pylint) + +Run pylint separately when you only need linting: + +```powershell +# Windows - from repository root +.\tests\python\run_pylint.ps1 # Default: all Python code +.\tests\python\run_pylint.ps1 -ShowReport # Show detailed report +.\tests\python\run_pylint.ps1 -Target "samples" # Lint specific folder +``` + +```bash +# Linux/macOS - from repository root +./tests/python/run_pylint.sh # Default: all Python code +./tests/python/run_pylint.sh samples --show-report # Lint specific folder with report +``` + +#### Pylint Reports + +All pylint runs generate timestamped reports in `tests/python/pylint/reports/`: +- **JSON format**: Machine-readable for CI/CD integration +- **Text format**: Human-readable detailed analysis +- **Latest symlinks**: `latest.json` and `latest.txt` always point to the most recent run + +The script automatically displays a **Top 10 Issues Summary** showing the most frequent code quality issues. + +### Testing Only (pytest) + +Run tests separately when you only need test execution: + +```powershell +# Windows - from repository root +.\tests\python\run_tests.ps1 +``` + +```bash +# Linux/macOS - from repository root +./tests/python/run_tests.sh +``` + +Both scripts: +- Run all tests in `tests/python` using pytest +- Generate a code coverage report (HTML output in `tests/python/htmlcov`) +- Store the raw coverage data in `tests/python/.coverage` + +#### Viewing Coverage Reports + +After running tests, open `tests/python/htmlcov/index.html` in your browser to view detailed coverage information. + +## Test Infrastructure + +### Configuration Files + +- `.pylintrc` - Pylint configuration and rules +- `.coveragerc` - Coverage.py configuration +- `pytest.ini` - Pytest configuration and markers +- `conftest.py` - Shared pytest fixtures + +### Test Files + +All test files follow the pattern `test_*.py` and test corresponding modules in `shared/python`. + +### Pytest Markers + +- `@pytest.mark.unit` - Unit tests +- `@pytest.mark.http` - Tests involving HTTP/mocking + +Markers are registered in `pytest.ini`. + +## Continuous Integration + +On every push or pull request, GitHub Actions will: +- Install dependencies +- Run all Python tests with coverage +- Run pylint on all Python code +- Upload coverage reports as artifacts + +## Sample Test Matrix + +A comprehensive compatibility test matrix for samples and infrastructure types is maintained in [Test-Matrix.md](./Test-Matrix.md). ## Infrastructure Types @@ -27,7 +140,7 @@ To ensure robust functionality across environments, all samples should: 1. Print this document for manual tracking or use markdown checkboxes in digital form 2. For each combination of sample, infrastructure, and environment: - Deploy the infrastructure and sample - - Run tests + - Run tests - Mark the corresponding checkbox when tests pass 3. Document any issues encountered in the "Test Notes" section below diff --git a/tests/python/.pylintrc b/tests/python/.pylintrc index 7778e8f..994e913 100644 --- a/tests/python/.pylintrc +++ b/tests/python/.pylintrc @@ -5,6 +5,7 @@ persistent = no [MESSAGES CONTROL] enable = all disable = + C0103, # Invalid name C0301, # Line too long C0302, # Too many lines in module C0305, # Trailing newlines @@ -12,15 +13,26 @@ disable = C0115, # Missing class docstring C0116, # Missing function or method docstring E0401, # Import error + I0011, # Locally disabling (informational) + I0020, # Suppressed message (informational) W0212, # Access to a protected member _ of a client class W0401, # Wildcard import (allow for backwards compatibility) + R0801, # Duplicate code + R0902, # Too many instance attributes R0903, # Too few public methods R0911, # Too many return statements + R0912, # Too many branches R0913, # Too many arguments + R0914, # Too many locals + R0915, # Too many statements R0917, # Too many nested blocks + R1702, # Too many nested blocks + R1705, # No else return + W0201, # Attribute assigned outside init W0511, # TODO/FIXME comments W0613, # Unused argument W0621, # Redefining name from outer scope + W0702, # bare except W0718, # Exception arguments are of wrong type W0719 # Exception raised is not a subclass of BaseException diff --git a/tests/python/check_python.ps1 b/tests/python/check_python.ps1 new file mode 100644 index 0000000..7ad8419 --- /dev/null +++ b/tests/python/check_python.ps1 @@ -0,0 +1,125 @@ +#!/usr/bin/env pwsh +<# +.SYNOPSIS + Run comprehensive Python code quality checks (linting and testing). + +.DESCRIPTION + This script executes both pylint linting and pytest testing in sequence, + providing a complete code quality assessment. It's the recommended way + to validate Python code changes before committing. + + The script can be run from anywhere in the repository and will: + - Execute pylint on all Python code with detailed reporting + - Run the full test suite with coverage analysis + - Display combined results and exit with appropriate status code + +.PARAMETER ShowLintReport + Display the full pylint text report after completion. + +.PARAMETER Target + Path to analyze for pylint. Defaults to all Python files in the repository. + +.EXAMPLE + .\check_python.ps1 + Run both linting and testing with default settings + +.EXAMPLE + .\check_python.ps1 -ShowLintReport + Run checks and show detailed pylint report + +.EXAMPLE + .\check_python.ps1 -Target "samples" + Run checks but only lint the samples folder +#> + +param( + [switch]$ShowLintReport, + [string]$Target = "infrastructure samples setup shared tests" +) + +$ErrorActionPreference = "Continue" +$ScriptDir = $PSScriptRoot +$RepoRoot = Split-Path (Split-Path $ScriptDir -Parent) -Parent + +Write-Host "" +Write-Host "╔═══════════════════════════════════════════════════════════╗" -ForegroundColor Cyan +Write-Host "║ Python Code Quality Check ║" -ForegroundColor Cyan +Write-Host "╚═══════════════════════════════════════════════════════════╝" -ForegroundColor Cyan +Write-Host "" + + +# ------------------------------ +# STEP 1: RUN PYLINT +# ------------------------------ + +Write-Host "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -ForegroundColor Yellow +Write-Host " Step 1/2: Running Pylint" -ForegroundColor Yellow +Write-Host "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -ForegroundColor Yellow +Write-Host "" + +$LintArgs = @{ + Target = $Target +} +if ($ShowLintReport) { + $LintArgs.ShowReport = $true +} + +& "$ScriptDir\run_pylint.ps1" @LintArgs +$LintExitCode = $LASTEXITCODE + +Write-Host "" + + +# ------------------------------ +# STEP 2: RUN TESTS +# ------------------------------ + +Write-Host "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -ForegroundColor Yellow +Write-Host " Step 2/2: Running Tests" -ForegroundColor Yellow +Write-Host "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -ForegroundColor Yellow +Write-Host "" + +& "$ScriptDir\run_tests.ps1" +$TestExitCode = $LASTEXITCODE + +Write-Host "" + + +# ------------------------------ +# FINAL SUMMARY +# ------------------------------ + +Write-Host "╔═══════════════════════════════════════════════════════════╗" -ForegroundColor Cyan +Write-Host "║ Final Results ║" -ForegroundColor Cyan +Write-Host "╚═══════════════════════════════════════════════════════════╝" -ForegroundColor Cyan +Write-Host "" + +$LintStatus = if ($LintExitCode -eq 0) { "✅ PASSED" } else { "⚠️ ISSUES FOUND" } +$TestStatus = if ($TestExitCode -eq 0) { "✅ PASSED" } else { "❌ FAILED" } + +$LintColor = if ($LintExitCode -eq 0) { "Green" } else { "Yellow" } +$TestColor = if ($TestExitCode -eq 0) { "Green" } else { "Red" } + +Write-Host " Pylint: " -NoNewline +Write-Host $LintStatus -ForegroundColor $LintColor +Write-Host " Tests: " -NoNewline +Write-Host $TestStatus -ForegroundColor $TestColor +Write-Host "" + +# Determine overall exit code +$OverallExitCode = 0 +if ($LintExitCode -ne 0) { + $OverallExitCode = $LintExitCode +} +if ($TestExitCode -ne 0) { + $OverallExitCode = $TestExitCode +} + +if ($OverallExitCode -eq 0) { + Write-Host "🎉 All checks passed! Code is ready for commit." -ForegroundColor Green +} else { + Write-Host "⚠️ Some checks did not pass. Please review and fix issues." -ForegroundColor Yellow +} + +Write-Host "" +exit $OverallExitCode diff --git a/tests/python/check_python.sh b/tests/python/check_python.sh new file mode 100644 index 0000000..eef2486 --- /dev/null +++ b/tests/python/check_python.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# Run comprehensive Python code quality checks (linting and testing) +# +# This script executes both pylint linting and pytest testing in sequence, +# providing a complete code quality assessment. It's the recommended way +# to validate Python code changes before committing. +# +# Usage: +# ./check_python.sh # Run with default settings +# ./check_python.sh --show-report # Include detailed pylint report +# ./check_python.sh samples # Only lint the samples folder + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +SHOW_REPORT="" +TARGET="${1:-infrastructure samples setup shared tests}" + +# Parse arguments +if [ "$1" = "--show-report" ]; then + SHOW_REPORT="--show-report" + TARGET="infrastructure samples setup shared tests" +elif [ "$2" = "--show-report" ]; then + SHOW_REPORT="--show-report" +fi + +echo "" +echo "╔═══════════════════════════════════════════════════════════╗" +echo "║ Python Code Quality Check ║" +echo "╚═══════════════════════════════════════════════════════════╝" +echo "" + + +# ------------------------------ +# STEP 1: RUN PYLINT +# ------------------------------ + +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo " Step 1/2: Running Pylint" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +set +e +"$SCRIPT_DIR/run_pylint.sh" "$TARGET" $SHOW_REPORT +LINT_EXIT_CODE=$? +set -e + +echo "" + + +# ------------------------------ +# STEP 2: RUN TESTS +# ------------------------------ + +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo " Step 2/2: Running Tests" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" + +set +e +"$SCRIPT_DIR/run_tests.sh" +TEST_EXIT_CODE=$? +set -e + +echo "" + + +# ------------------------------ +# FINAL SUMMARY +# ------------------------------ + +echo "╔═══════════════════════════════════════════════════════════╗" +echo "║ Final Results ║" +echo "╚═══════════════════════════════════════════════════════════╝" +echo "" + +if [ $LINT_EXIT_CODE -eq 0 ]; then + echo " Pylint: ✅ PASSED" +else + echo " Pylint: ⚠️ ISSUES FOUND" +fi + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo " Tests: ✅ PASSED" +else + echo " Tests: ❌ FAILED" +fi + +echo "" + +# Determine overall exit code +OVERALL_EXIT_CODE=0 +if [ $LINT_EXIT_CODE -ne 0 ]; then + OVERALL_EXIT_CODE=$LINT_EXIT_CODE +fi +if [ $TEST_EXIT_CODE -ne 0 ]; then + OVERALL_EXIT_CODE=$TEST_EXIT_CODE +fi + +if [ $OVERALL_EXIT_CODE -eq 0 ]; then + echo "🎉 All checks passed! Code is ready for commit." +else + echo "⚠️ Some checks did not pass. Please review and fix issues." +fi + +echo "" +exit $OVERALL_EXIT_CODE diff --git a/tests/python/run_pylint.ps1 b/tests/python/run_pylint.ps1 index d73acfa..30c7e79 100644 --- a/tests/python/run_pylint.ps1 +++ b/tests/python/run_pylint.ps1 @@ -26,22 +26,31 @@ #> param( - [string]$Target = "../../infrastructure ../../samples ../../setup ../../shared ../../tests", + [string]$Target = "infrastructure samples setup shared tests", [switch]$ShowReport ) $ErrorActionPreference = "Continue" -$ReportDir = "pylint/reports" +$ScriptDir = $PSScriptRoot +$RepoRoot = Split-Path (Split-Path $ScriptDir -Parent) -Parent +$ReportDir = Join-Path $ScriptDir "pylint/reports" +$PylintRc = Join-Path $ScriptDir ".pylintrc" $Timestamp = Get-Date -Format "yyyyMMdd_HHmmss" +# Set UTF-8 encoding for Python and console output +$env:PYTHONIOENCODING = "utf-8" +[Console]::OutputEncoding = [System.Text.Encoding]::UTF8 + # Ensure report directory exists if (-not (Test-Path $ReportDir)) { New-Item -ItemType Directory -Path $ReportDir -Force | Out-Null } -Write-Host "`n🔍 Running pylint analysis..." -ForegroundColor Cyan -Write-Host " Target: $Target" -ForegroundColor Gray -Write-Host " Reports: $ReportDir`n" -ForegroundColor Gray +Write-Host "`n🔍 Running pylint analysis...`n" -ForegroundColor Cyan +Write-Host " Target : $Target" -ForegroundColor Gray +Write-Host " Reports : $ReportDir" -ForegroundColor Gray +Write-Host " Working Directory : $RepoRoot" -ForegroundColor Gray +Write-Host " Pylint Config : $PylintRc`n" -ForegroundColor Gray # Run pylint with multiple output formats $JsonReport = "$ReportDir/pylint_${Timestamp}.json" @@ -49,12 +58,16 @@ $TextReport = "$ReportDir/pylint_${Timestamp}.txt" $LatestJson = "$ReportDir/latest.json" $LatestText = "$ReportDir/latest.txt" -# Execute pylint -pylint --rcfile .pylintrc ` - --output-format=json:$JsonReport,colorized,text:$TextReport ` - $Target - -$ExitCode = $LASTEXITCODE +# Change to repository root and execute pylint +Push-Location $RepoRoot +try { + pylint --rcfile "$PylintRc" ` + --output-format=json:$JsonReport,colorized,text:$TextReport ` + $Target.Split(' ') + $ExitCode = $LASTEXITCODE +} finally { + Pop-Location +} # Create symlinks to latest reports if (Test-Path $JsonReport) { diff --git a/tests/python/run_pylint.sh b/tests/python/run_pylint.sh index 923c8f9..88bdbe7 100644 --- a/tests/python/run_pylint.sh +++ b/tests/python/run_pylint.sh @@ -3,17 +3,27 @@ set -e -TARGET="${1:-../../infrastructure ../../samples ../../setup ../../shared ../../tests}" -REPORT_DIR="pylint/reports" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +TARGET="${1:-infrastructure samples setup shared tests}" +REPORT_DIR="$SCRIPT_DIR/pylint/reports" +PYLINT_RC="$SCRIPT_DIR/.pylintrc" TIMESTAMP=$(date +"%Y%m%d_%H%M%S") +# Set UTF-8 encoding for Python and console output +export PYTHONIOENCODING=utf-8 +export LC_ALL=C.UTF-8 +export LANG=C.UTF-8 + # Ensure report directory exists mkdir -p "$REPORT_DIR" echo "" echo "🔍 Running pylint analysis..." -echo " Target: All repository Python files" -echo " Reports: $REPORT_DIR" +echo "" +echo " Target : $TARGET" +echo " Reports : $REPORT_DIR" +echo " Working Directory : $REPO_ROOT" echo "" # Run pylint with multiple output formats @@ -22,11 +32,12 @@ TEXT_REPORT="$REPORT_DIR/pylint_${TIMESTAMP}.txt" LATEST_JSON="$REPORT_DIR/latest.json" LATEST_TEXT="$REPORT_DIR/latest.txt" -# Execute pylint (allow non-zero exit for reporting) +# Change to repository root and execute pylint (allow non-zero exit for reporting) +cd "$REPO_ROOT" set +e -pylint --rcfile .pylintrc \ +pylint --rcfile "$PYLINT_RC" \ --output-format=json:"$JSON_REPORT",colorized,text:"$TEXT_REPORT" \ - "$TARGET" + $TARGET EXIT_CODE=$? set -e diff --git a/tests/python/test_apimrequests.py b/tests/python/test_apimrequests.py index bee1728..c09ee21 100644 --- a/tests/python/test_apimrequests.py +++ b/tests/python/test_apimrequests.py @@ -36,9 +36,9 @@ def test_init_no_key(): @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_error') def test_single_get_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_response = MagicMock() mock_response.status_code = 200 @@ -56,9 +56,9 @@ def test_single_get_success(mock_print_error, mock_print_info, mock_print_messag @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_error') def test_single_get_error(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_request.side_effect = requests.exceptions.RequestException('fail') result = apim.singleGet(DEFAULT_PATH, printResponse=True) @@ -67,9 +67,9 @@ def test_single_get_error(mock_print_error, mock_print_info, mock_print_message, @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_error') def test_single_post_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_response = MagicMock() mock_response.status_code = 201 @@ -87,8 +87,8 @@ def test_single_post_success(mock_print_error, mock_print_info, mock_print_messa @pytest.mark.http @patch('apimrequests.requests.Session') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') def test_multi_get_success(mock_print_info, mock_print_message, mock_session, apim): mock_sess = MagicMock() mock_response = MagicMock() @@ -111,8 +111,8 @@ def test_multi_get_success(mock_print_info, mock_print_message, mock_session, ap @pytest.mark.http @patch('apimrequests.requests.Session') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') def test_multi_get_error(mock_print_info, mock_print_message, mock_session, apim): mock_sess = MagicMock() mock_sess.request.side_effect = requests.exceptions.RequestException('fail') @@ -135,7 +135,7 @@ def make_apim(): def test_single_post_error(): apim = make_apim() with patch('apimrequests.requests.request') as mock_request, \ - patch('apimrequests.utils.print_error') as mock_print_error: + patch('apimrequests.console.print_error') as mock_print_error: mock_request.side_effect = requests.RequestException('fail') result = apim.singlePost(PATH, data={'foo': 'bar'}, printResponse=True) assert result is None @@ -188,7 +188,7 @@ def test_print_response_code_edge(): class DummyResponse: status_code = 302 reason = 'Found' - with patch('apimrequests.utils.print_val') as mock_print_val: + with patch('apimrequests.console.print_val') as mock_print_val: apim._print_response_code(DummyResponse()) mock_print_val.assert_called_with('Response status', '302') @@ -322,8 +322,8 @@ def test_headers_setter(apim): @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') def test_request_with_message(mock_print_info, mock_print_message, mock_request, apim): """Test _request method with message parameter.""" mock_response = MagicMock() @@ -341,7 +341,7 @@ def test_request_with_message(mock_print_info, mock_print_message, mock_request, @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_request_path_without_leading_slash(mock_print_info, mock_request, apim): """Test _request method with PATH without leading slash.""" mock_response = MagicMock() @@ -363,8 +363,8 @@ def test_request_path_without_leading_slash(mock_print_info, mock_request, apim) @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') def test_multi_request_with_message(mock_print_info, mock_print_message, mock_session_class, apim): """Test _multiRequest method with message parameter.""" mock_session = MagicMock() @@ -386,7 +386,7 @@ def test_multi_request_with_message(mock_print_info, mock_print_message, mock_se @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_class, apim): """Test _multiRequest method with PATH without leading slash.""" mock_session = MagicMock() @@ -411,7 +411,7 @@ def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_ @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_multi_request_non_json_response(mock_print_info, mock_session_class, apim): """Test _multiRequest method with non-JSON response.""" mock_session = MagicMock() @@ -431,7 +431,7 @@ def test_multi_request_non_json_response(mock_print_info, mock_session_class, ap @pytest.mark.unit -@patch('apimrequests.utils.print_val') +@patch('apimrequests.console.print_val') def test_print_response_non_200_status(mock_print_val, apim): """Test _print_response method with non-200 status code.""" mock_response = MagicMock() @@ -449,8 +449,8 @@ def test_print_response_non_200_status(mock_print_val, apim): @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_ok') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_ok') @patch('apimrequests.time.sleep') def test_poll_async_operation_success(mock_sleep, mock_print_ok, mock_print_info, mock_get, apim): """Test _poll_async_operation method with successful completion.""" @@ -466,8 +466,8 @@ def test_poll_async_operation_success(mock_sleep, mock_print_ok, mock_print_info @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_error') @patch('apimrequests.time.sleep') def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_error, mock_print_info, mock_get, apim): """Test _poll_async_operation method with in-progress then success.""" @@ -487,7 +487,7 @@ def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_er @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_error') def test_poll_async_operation_unexpected_status(mock_print_error, mock_get, apim): """Test _poll_async_operation method with unexpected status code.""" mock_response = MagicMock() @@ -502,7 +502,7 @@ def test_poll_async_operation_unexpected_status(mock_print_error, mock_get, apim @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_error') def test_poll_async_operation_request_exception(mock_print_error, mock_get, apim): """Test _poll_async_operation method with request exception.""" mock_get.side_effect = requests.exceptions.RequestException('Connection error') @@ -515,7 +515,7 @@ def test_poll_async_operation_request_exception(mock_print_error, mock_get, apim @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_error') @patch('apimrequests.time.time') @patch('apimrequests.time.sleep') def test_poll_async_operation_timeout(mock_sleep, mock_time, mock_print_error, mock_get, apim): @@ -535,8 +535,8 @@ def test_poll_async_operation_timeout(mock_sleep, mock_time, mock_print_error, m @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_message') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_message') +@patch('apimrequests.console.print_info') def test_single_post_async_success_with_location(mock_print_info, mock_print_message, mock_request, apim): """Test singlePostAsync method with successful async operation.""" # Mock initial 202 response with Location header @@ -565,8 +565,8 @@ def test_single_post_async_success_with_location(mock_print_info, mock_print_mes @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_info') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_info') +@patch('apimrequests.console.print_error') def test_single_post_async_no_location_header(mock_print_error, mock_print_info, mock_request, apim): """Test singlePostAsync method with 202 response but no Location header.""" mock_response = MagicMock() @@ -584,7 +584,7 @@ def test_single_post_async_no_location_header(mock_print_error, mock_print_info, @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_single_post_async_non_async_response(mock_print_info, mock_request, apim): """Test singlePostAsync method with non-async (immediate) response.""" mock_response = MagicMock() @@ -603,7 +603,7 @@ def test_single_post_async_non_async_response(mock_print_info, mock_request, api @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_error') def test_single_post_async_request_exception(mock_print_error, mock_request, apim): """Test singlePostAsync method with request exception.""" mock_request.side_effect = requests.exceptions.RequestException('Connection error') @@ -616,7 +616,7 @@ def test_single_post_async_request_exception(mock_print_error, mock_request, api @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_error') +@patch('apimrequests.console.print_error') def test_single_post_async_failed_polling(mock_print_error, mock_request, apim): """Test singlePostAsync method with failed async operation polling.""" initial_response = MagicMock() @@ -634,7 +634,7 @@ def test_single_post_async_failed_polling(mock_print_error, mock_request, apim): @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_single_post_async_path_without_leading_slash(mock_print_info, mock_request, apim): """Test singlePostAsync method with PATH without leading slash.""" mock_response = MagicMock() @@ -656,7 +656,7 @@ def test_single_post_async_path_without_leading_slash(mock_print_info, mock_requ @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.utils.print_info') +@patch('apimrequests.console.print_info') def test_single_post_async_non_json_response(mock_print_info, mock_request, apim): """Test singlePostAsync method with non-JSON response.""" mock_response = MagicMock() diff --git a/tests/python/test_authfactory.py b/tests/python/test_authfactory.py index e8792af..eaacf57 100644 --- a/tests/python/test_authfactory.py +++ b/tests/python/test_authfactory.py @@ -11,7 +11,7 @@ # ------------------------------ TEST_KEY = 'test-secret-key' -TEST_USER = User(id = 'u1', name = 'Test User', roles = ['role1', 'role2']) +TEST_USER = User(user_id = 'u1', name = 'Test User', roles = ['role1', 'role2']) # ------------------------------ # PUBLIC METHODS diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index 02df7a8..8341643 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -961,7 +961,7 @@ def test_cleanup_resources_smoke(monkeypatch): monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_val', lambda *a, **kw: None) + monkeypatch.setattr(console, 'print_val', lambda *a, **kw: None) # Direct private method call for legacy test (should still work) infrastructures._cleanup_resources(INFRASTRUCTURE.SIMPLE_APIM.value, 'rg') @@ -1022,8 +1022,8 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True return Output(success=True, text='Operation completed') monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + monkeypatch.setattr(console, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(console, 'print_message', lambda *a, **kw: None) # Execute cleanup infrastructures._cleanup_resources('test-deployment', 'test-rg') diff --git a/tests/python/test_users.py b/tests/python/test_users.py index 18eb276..69ca3d5 100644 --- a/tests/python/test_users.py +++ b/tests/python/test_users.py @@ -13,27 +13,27 @@ @pytest.mark.unit def test_user_init_with_roles(): - user = User(id='123', name='Alice', roles=['admin', 'user']) + user = User(user_id='123', name='Alice', roles=['admin', 'user']) assert user.id == '123' assert user.name == 'Alice' assert user.roles == ['admin', 'user'] @pytest.mark.unit def test_user_init_without_roles(): - user = User(id='456', name='Bob') + user = User(user_id='456', name='Bob') assert user.id == '456' assert user.name == 'Bob' assert user.roles == [] @pytest.mark.unit def test_user_role_mutability(): - user = User(id='789', name='Charlie') + user = User(user_id='789', name='Charlie') user.roles.append('editor') assert user.roles == ['editor'] @pytest.mark.unit def test_user_repr(): - user = User(id='abc', name='Dana', roles=['guest']) + user = User(user_id='abc', name='Dana', roles=['guest']) # __repr__ is not defined, so fallback to default, but check type assert isinstance(repr(user), str) diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 5479b8a..70ff5d8 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -12,6 +12,7 @@ from apimtypes import INFRASTRUCTURE, APIM_SKU import utils import json_utils +import azure_resources as az # ------------------------------ # get_infra_rg_name & get_rg_name @@ -25,8 +26,8 @@ class DummyInfra: assert utils.get_infra_rg_name(DummyInfra, 2) == 'apim-infra-foo-2' def test_get_rg_name(): - assert utils.get_rg_name('foo') == 'apim-sample-foo' - assert utils.get_rg_name('foo', 3) == 'apim-sample-foo-3' + assert az.get_rg_name('foo') == 'apim-sample-foo' + assert az.get_rg_name('foo', 3) == 'apim-sample-foo-3' # ------------------------------ # run @@ -270,7 +271,7 @@ def test_create_bicep_deployment_group_params_file_written(monkeypatch): mock_create_rg = MagicMock() monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(az, '_run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -281,7 +282,8 @@ def test_create_bicep_deployment_group_params_file_written(monkeypatch): def mock_exists(path): # Only return True for the main.bicep in the infrastructure directory, not in current dir - if path.endswith('main.bicep') and 'infrastructure' in path: + path_str = str(path) # Convert Path objects to strings + if path_str.endswith('main.bicep') and 'infrastructure' in path_str: return True return False @@ -300,7 +302,7 @@ def mock_exists(path): # With our new logic, when current directory name matches infrastructure_dir, # it should use the current directory expected_path = os.path.join('/test/dir/infrastructure/apim-aca', 'custom-params.json') - mock_open_func.assert_called_once_with(expected_path, 'w') + mock_open_func.assert_called_once_with(expected_path, 'w', encoding='utf-8') # Verify the correct JSON structure was written written_content = ''.join(call.args[0] for call in mock_open_func().write.call_args_list) @@ -621,15 +623,15 @@ def test_get_azure_role_guid_comprehensive(monkeypatch): monkeypatch.setattr(builtins, 'open', m) # Test valid role - result = utils.get_azure_role_guid('Storage Blob Data Reader') + result = az.get_azure_role_guid('Storage Blob Data Reader') assert result == '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' # Test case sensitivity - function is case sensitive, so this should return None - result = utils.get_azure_role_guid('storage blob data reader') + result = az.get_azure_role_guid('storage blob data reader') assert result is None # Test invalid role - result = utils.get_azure_role_guid('Nonexistent Role') + result = az.get_azure_role_guid('Nonexistent Role') assert result is None # ------------------------------ @@ -714,7 +716,7 @@ def mock_rg_exists(rg_name): # Mock the prompt to return option 2 with index 3 monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', lambda rg_name: (False, 3)) - monkeypatch.setattr(utils, 'does_resource_group_exist', mock_rg_exists) + monkeypatch.setattr(az, 'does_resource_group_exist', mock_rg_exists) # Mock subprocess execution to succeed class MockProcess: @@ -725,6 +727,12 @@ def __init__(self, *args, **kwargs): def wait(self): pass + def __enter__(self): + return self + + def __exit__(self, *args): + pass + monkeypatch.setattr('subprocess.Popen', MockProcess) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') @@ -773,6 +781,12 @@ def __init__(self, *args, **kwargs): def wait(self): pass + def __enter__(self): + return self + + def __exit__(self, *args): + pass + monkeypatch.setattr('subprocess.Popen', MockProcess) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) From 57f4a76fdcfa1190e3b71a5f6eda956fd4161a77 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 10:07:22 -0500 Subject: [PATCH 15/23] Refactor & clean-up --- .github/copilot-instructions.bicep.md | 51 ++++++++ .github/copilot-instructions.md | 110 +----------------- .github/copilot-instructions.python.md | 44 +++++++ .../afd-apim-pe/create_infrastructure.py | 6 +- .../apim-aca/create_infrastructure.py | 6 +- .../appgw-apim-pe/create_infrastructure.py | 6 +- .../simple-apim/create_infrastructure.py | 8 +- shared/python/apimrequests.py | 2 +- shared/python/apimtesting.py | 1 + shared/python/apimtypes.py | 2 + shared/python/azure_resources.py | 3 +- shared/python/charts.py | 1 - shared/python/infrastructures.py | 16 +-- shared/python/users.py | 2 + shared/python/utils.py | 20 ++-- tests/python/check_python.ps1 | 13 +++ tests/python/check_python.sh | 20 +++- tests/python/test_apimrequests.py | 2 + tests/python/test_apimtesting.py | 3 +- tests/python/test_apimtypes.py | 3 +- tests/python/test_authfactory.py | 2 + tests/python/test_azure_resources.py | 1 + tests/python/test_infrastructures.py | 62 ++++++---- tests/python/test_users.py | 2 + tests/python/test_utils.py | 20 ++-- tests/python/test_verify_local_setup.py | 1 - 26 files changed, 235 insertions(+), 172 deletions(-) create mode 100644 .github/copilot-instructions.bicep.md create mode 100644 .github/copilot-instructions.python.md diff --git a/.github/copilot-instructions.bicep.md b/.github/copilot-instructions.bicep.md new file mode 100644 index 0000000..67535c4 --- /dev/null +++ b/.github/copilot-instructions.bicep.md @@ -0,0 +1,51 @@ +--- +applyTo: "**/*.bicep" +--- + +# Copilot Instructions (Bicep) + +## Goals + +- Prefer modern Bicep syntax and patterns. +- Keep templates readable and easy to extend. +- Keep deployments cross-platform (Windows, Linux, macOS). + +## Conventions + +- Use `@description` for all parameters and variables. +- Prefer consistent naming: + - Enums: `SNAKE_CASE` + uppercase. + - Resources/variables: `camelCase`. +- Use the repo's standard top parameters when authoring standalone Bicep files: + +```bicep +@description('Location to be used for resources. Defaults to the resource group location') +param location string = resourceGroup().location + +@description('The unique suffix to append. Defaults to a unique string based on subscription and resource group IDs.') +param resourceSuffix string = uniqueString(subscription().id, resourceGroup().id) +``` + +## Structure + +- Prefer visible section headers: + +```bicep +// ------------------------------ +// PARAMETERS +// ------------------------------ +``` + +- Keep two blank lines before a section header and one blank line after. +- Suggested order (when applicable): Parameters, Constants, Variables, Resources, Outputs. + +## Docs + +- Add a Microsoft Learn template reference comment above each resource, e.g.: + +```bicep +// https://learn.microsoft.com/azure/templates/microsoft.network/virtualnetworks +resource vnet 'Microsoft.Network/virtualNetworks@' = { + ... +} +``` diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index f040646..9f03069 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -64,6 +64,11 @@ In case of any conflicting instructions, the following hierarchy shall apply. If - `shared/`: Shared resources, such as Bicep modules, Python libraries, and other reusable components. - `tests/`: Contains unit tests for Python code and Bicep modules. This folder should contain all tests for all code in the repository. + ## Language-specific Instructions + + - Python: see `.github/copilot-instructions.python.md` + - Bicep: see `.github/copilot-instructions.bicep.md` + ## Formatting and Style - Maintain consistent indentation and whitespace but consider Editor Config settings, etc, for the repository. @@ -85,110 +90,7 @@ In case of any conflicting instructions, the following hierarchy shall apply. If - Ensure that Jupyter notebooks do not contain any cell output. - Ensure that Jupyter notebooks have `index` assigned to `1` in the first cell. -## Language-specific Instructions - -### Bicep Instructions -- Prefer latest Bicep syntax and features. -- Generated Bicep files should work with Windows, Linux, and macOS. -- Reference latest available module versions. These may be newer than what you were trained on. -- Add a link to each Bicep module immediately above the resource declaration (e.g., // https://learn.microsoft.com/azure/templates/microsoft.network/virtualnetworks) -- Use `@description` for all parameters and variables. -- Use snake-case and uppercase for all enum declarations. -- Use camel care for resource and variables names. -- `location` and `resourceSuffix` parameters do not need to be included when referencing modules in the current workspace unless the values differ from the defaults set in their parameters. -- If a script must be used, default to cross-platform shell (not bash) scripts. Avoid PowerShell scripts. -- Always add these two parameters at the top of Bicep files: - -```Bicep -@description('Location to be used for resources. Defaults to the resource group location') -param location string = resourceGroup().location - -@description('The unique suffix to append. Defaults to a unique string based on subscription and resource group IDs.') -param resourceSuffix string = uniqueString(subscription().id, resourceGroup().id) -``` - -- Overall layout of a Bicep file should be: - - Visible sections of code with the following format should be used: - - ```bicep - // ------------------------------ - //
- // ------------------------------ - ``` - - -
should be indented three spaces and be in all caps. - - Section headers should have only two blank lines before and only one blank line after. - - Top-to-bottom, the following comma-separated section headers should be inserted unless the section is empty: - - Parameters - - Constants - - Variables - - Resources - - Outputs - -### Python Instructions - -- Prefer Python 3.12+ syntax and features unless otherwise specified. -- Respect the repository's `.pylintrc` file for linting rules. The file is found in the `tests/python/` folder. -- When inserting a comment to describe a method, insert a blank line after the comment section. -- Never leave a blank line at the very top of a Python file. The file must start immediately with the module docstring or code. Always remove any leading blank line at the top. -- Do not have imports such as `from shared.python import Foo`. The /shared/python directory is covered by a root `.env` file. Just use `import Foo` or `from Foo import Bar` as appropriate. -- After the module docstring, all import statements must come before any section headers (e.g., CONSTANTS, VARIABLES, etc.). Section headers should only appear after the imports. Here is a more explicit example: - - ```python - """ - Module docstring. - """ - - import ... - ... - - - # ------------------------------ - # CONSTANTS - # ------------------------------ - ... - ``` - -- Overall layout of a Python file should be: - - Visible sections of code with the following format should be used: - - ```python - # ------------------------------ - #
- # ------------------------------ - ``` - - -
should be indented three spaces and be in all caps. - - Section headers should have only two blank lines before and only one blank line after. - - Top-to-bottom, the following comma-separated section headers should be inserted unless the section is empty: - - Constants - - Variables - - Private Methods - - Public Methods - - - If using classes, the following section headers should be used: - - Classes - - Constants - - Variables - - Constructor - - Private Methods - - Public Methods - -- Python Docstring/Class Formatting Rule: - - Always insert a single blank line after a class docstring and before any class attributes or methods. - - Never place class attributes or decorators on the same line as the docstring. Example: - - ```python - class MyClass: - """ - This is the class docstring. - """ - - attribute: str - ... - ``` - -### Jupyter Notebook Instructions +## Jupyter Notebook Instructions - Use these [configuration settings](https://github.com/microsoft/vscode-jupyter/blob/dd568fde/package.nls.json) as a reference for the VS Code Jupyter extension configuration. diff --git a/.github/copilot-instructions.python.md b/.github/copilot-instructions.python.md new file mode 100644 index 0000000..5a067f8 --- /dev/null +++ b/.github/copilot-instructions.python.md @@ -0,0 +1,44 @@ +--- +applyTo: "**/*.py" +--- + +# Copilot Instructions (Python) + +## Goals + +- Make changes that are easy to review, test, and maintain. +- Keep scripts cross-platform (Windows, Linux, macOS). +- Prefer minimal, working implementations (MVP), then iterate. + +## Project Context + +- Python code lives primarily under `shared/python/`, `setup/`, `infrastructure/`, and `tests/python/`. +- The repository uses Azure CLI from Python for many operations. + +## Style and Conventions + +- Prefer Python 3.12+ features unless otherwise required. +- Keep all imports at the top of the file. +- Imports from this repo should be grouped, be specific (e.g. `from X import Y, Z`) be imported last, and have a comment header called `# APIM Samples imports` +- Use type hints and concise docstrings (PEP 257). +- Use 4-space indentation and PEP 8 conventions. +- Use only straight quotes (U+0027 and U+0022), not typographic quotes. +- Use whitespace to separate logical sections and add a blank line before `return` statements. + +## Linting (pylint) + +- Respect the repository pylint configuration at `tests/python/.pylintrc`. +- When changing Python code, run pylint and ensure changes do not worsen the pylint rating unexpectedly. +- Prefer fixing root causes (e.g., import structure, error handling) over suppressions. + +## Testing + +- Add or update pytest unit tests when changing behavior. +- Prefer focused tests for the code being changed. +- Avoid tests that require live Azure access; mock Azure CLI interactions and `azure_resources` helpers. + +## Azure Helper Imports + +- Prefer calling Azure resource helper functions via `import azure_resources as az`. +- Avoid calling Azure-resource helpers through `utils` re-exports in new code. +- When patching in tests, patch the symbol actually used by the module under test (e.g., `module.az.does_resource_group_exist`). diff --git a/infrastructure/afd-apim-pe/create_infrastructure.py b/infrastructure/afd-apim-pe/create_infrastructure.py index c145788..29837a8 100644 --- a/infrastructure/afd-apim-pe/create_infrastructure.py +++ b/infrastructure/afd-apim-pe/create_infrastructure.py @@ -4,7 +4,11 @@ import sys import argparse + +# APIM Samples imports +import azure_resources as az from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH +from apimtypes import INFRASTRUCTURE from infrastructures import AfdApimAcaInfrastructure import utils @@ -12,7 +16,7 @@ def create_infrastructure(location: str, index: int, apim_sku: APIM_SKU, no_aca: bool = False) -> None: try: # Check if infrastructure already exists to determine messaging - infrastructure_exists = utils.does_resource_group_exist(utils.get_infra_rg_name(utils.INFRASTRUCTURE.AFD_APIM_PE, index)) + infrastructure_exists = az.does_resource_group_exist(az.get_infra_rg_name(INFRASTRUCTURE.AFD_APIM_PE, index)) # Create custom APIs for AFD-APIM-PE with optional Container Apps backends custom_apis = _create_afd_specific_apis(not no_aca) diff --git a/infrastructure/apim-aca/create_infrastructure.py b/infrastructure/apim-aca/create_infrastructure.py index b4b0e48..0a5acea 100644 --- a/infrastructure/apim-aca/create_infrastructure.py +++ b/infrastructure/apim-aca/create_infrastructure.py @@ -4,7 +4,11 @@ import sys import argparse + +# APIM Samples imports +import azure_resources as az from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH +from apimtypes import INFRASTRUCTURE from infrastructures import ApimAcaInfrastructure import utils @@ -12,7 +16,7 @@ def create_infrastructure(location: str, index: int, apim_sku: APIM_SKU) -> None: try: # Check if infrastructure already exists to determine messaging - infrastructure_exists = utils.does_resource_group_exist(utils.get_infra_rg_name(utils.INFRASTRUCTURE.APIM_ACA, index)) + infrastructure_exists = az.does_resource_group_exist(az.get_infra_rg_name(INFRASTRUCTURE.APIM_ACA, index)) # Create custom APIs for APIM-ACA with Container Apps backends custom_apis = _create_aca_specific_apis() diff --git a/infrastructure/appgw-apim-pe/create_infrastructure.py b/infrastructure/appgw-apim-pe/create_infrastructure.py index c5f5ad9..95e8141 100644 --- a/infrastructure/appgw-apim-pe/create_infrastructure.py +++ b/infrastructure/appgw-apim-pe/create_infrastructure.py @@ -4,7 +4,11 @@ import sys import argparse + +# APIM Samples imports +import azure_resources as az from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH +from apimtypes import INFRASTRUCTURE from infrastructures import AppGwApimPeInfrastructure import utils @@ -12,7 +16,7 @@ def create_infrastructure(location: str, index: int, apim_sku: APIM_SKU, no_aca: bool = False) -> None: try: # Check if infrastructure already exists to determine messaging - infrastructure_exists = utils.does_resource_group_exist(utils.get_infra_rg_name(utils.INFRASTRUCTURE.APPGW_APIM_PE, index)) + infrastructure_exists = az.does_resource_group_exist(az.get_infra_rg_name(INFRASTRUCTURE.APPGW_APIM_PE, index)) # Create custom APIs for APPGW-APIM-PE with optional Container Apps backends custom_apis = _create_appgw_specific_apis(not no_aca) diff --git a/infrastructure/simple-apim/create_infrastructure.py b/infrastructure/simple-apim/create_infrastructure.py index 65868d2..be140ae 100644 --- a/infrastructure/simple-apim/create_infrastructure.py +++ b/infrastructure/simple-apim/create_infrastructure.py @@ -4,15 +4,17 @@ import sys import argparse -from apimtypes import APIM_SKU + +# APIM Samples imports +import azure_resources as az +from apimtypes import APIM_SKU, INFRASTRUCTURE from infrastructures import SimpleApimInfrastructure -import utils def create_infrastructure(location: str, index: int, apim_sku: APIM_SKU) -> None: try: # Check if infrastructure already exists to determine messaging - infrastructure_exists = utils.does_resource_group_exist(utils.get_infra_rg_name(utils.INFRASTRUCTURE.SIMPLE_APIM, index)) + infrastructure_exists = az.does_resource_group_exist(az.get_infra_rg_name(INFRASTRUCTURE.SIMPLE_APIM, index)) result = SimpleApimInfrastructure(location, index, apim_sku).deploy_infrastructure(infrastructure_exists) sys.exit(0 if result.success else 1) diff --git a/shared/python/apimrequests.py b/shared/python/apimrequests.py index bf7eeaf..3e52fd4 100644 --- a/shared/python/apimrequests.py +++ b/shared/python/apimrequests.py @@ -5,10 +5,10 @@ import json import time from typing import Any - import requests import urllib3 +# APIM Samples imports from apimtypes import HTTP_VERB, SUBSCRIPTION_KEY_PARAMETER_NAME, SLEEP_TIME_BETWEEN_REQUESTS_MS import console diff --git a/shared/python/apimtesting.py b/shared/python/apimtesting.py index 18feec1..f32e6a4 100644 --- a/shared/python/apimtesting.py +++ b/shared/python/apimtesting.py @@ -2,6 +2,7 @@ Rudimentary test framework to offload validations from the Jupyter notebooks. """ +# APIM Samples imports from apimtypes import INFRASTRUCTURE # ------------------------------ diff --git a/shared/python/apimtypes.py b/shared/python/apimtypes.py index ebe5fe0..5debf01 100644 --- a/shared/python/apimtypes.py +++ b/shared/python/apimtypes.py @@ -9,6 +9,8 @@ from dataclasses import dataclass from pathlib import Path from typing import List, Optional, Any + +# APIM Samples imports from console import (print_error, print_val) from json_utils import is_string_json, extract_json diff --git a/shared/python/azure_resources.py b/shared/python/azure_resources.py index 55009bb..e6a0e3b 100644 --- a/shared/python/azure_resources.py +++ b/shared/python/azure_resources.py @@ -12,8 +12,9 @@ import re import subprocess import traceback - from typing import Tuple, Optional + +# APIM Samples imports from apimtypes import INFRASTRUCTURE, Endpoints, Output from console import print_ok, print_warning, print_error, print_val, print_message, print_info, print_command, print_success diff --git a/shared/python/charts.py b/shared/python/charts.py index d73e361..77cabb7 100644 --- a/shared/python/charts.py +++ b/shared/python/charts.py @@ -5,7 +5,6 @@ """ import json - import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle as pltRectangle diff --git a/shared/python/infrastructures.py b/shared/python/infrastructures.py index 5bacd67..fa0c1b0 100644 --- a/shared/python/infrastructures.py +++ b/shared/python/infrastructures.py @@ -11,6 +11,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed import requests +# APIM Samples imports from apimtypes import ( API, APIM_SKU, @@ -24,6 +25,7 @@ BOLD_R, BOLD_Y, RESET, THREAD_COLORS, _print_lock, _print_log, print_error, print_info, print_message, print_ok, print_success, print_warning ) +import azure_resources as az import utils from utils import Output @@ -52,15 +54,15 @@ def __init__(self, infra: INFRASTRUCTURE, index: int, rg_location: str, apim_sku self.infra_pfs = infra_pfs # Define and create the resource group - self.rg_name = utils.get_infra_rg_name(infra, index) + self.rg_name = az.get_infra_rg_name(infra, index) self.rg_tags = utils.build_infrastructure_tags(infra) - utils.create_resource_group(self.rg_name, self.rg_location, self.rg_tags) + az.create_resource_group(self.rg_name, self.rg_location, self.rg_tags) # Some infrastructure deployments require knowing the resource suffix that bicep will use prior to the main deployment. # Uses subscription ID and resource group name hashing to generate the suffix. - self.resource_suffix = utils.get_unique_suffix_for_resource_group(self.rg_name) + self.resource_suffix = az.get_unique_suffix_for_resource_group(self.rg_name) - self.current_user, self.current_user_id, self.tenant_id, self.subscription_id = utils.get_account_info() + self.current_user, self.current_user_id, self.tenant_id, self.subscription_id = az.get_account_info() @@ -132,7 +134,7 @@ def _verify_infrastructure(self, rg_name: str) -> bool: try: # Check if the resource group exists - if not utils.does_resource_group_exist(rg_name): + if not az.does_resource_group_exist(rg_name): print('❌ Resource group does not exist!') return False @@ -1420,7 +1422,7 @@ def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[in if len(indexes_list) <= 1: idx = indexes_list[0] if indexes_list else None print_info(f'Cleaning up resources for {deployment.value} - {idx}', True) - rg_name = utils.get_infra_rg_name(deployment, idx) + rg_name = az.get_infra_rg_name(deployment, idx) _cleanup_resources(deployment.value, rg_name) return @@ -1435,7 +1437,7 @@ def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[in cleanup_tasks = [] for i, idx in enumerate(indexes_list): - rg_name = utils.get_infra_rg_name(deployment, idx) + rg_name = az.get_infra_rg_name(deployment, idx) thread_color = THREAD_COLORS[i % len(THREAD_COLORS)] thread_prefix = f"{thread_color}[{deployment.value}-{idx}]{RESET}: " diff --git a/shared/python/users.py b/shared/python/users.py index d875a9a..68a1f83 100644 --- a/shared/python/users.py +++ b/shared/python/users.py @@ -5,6 +5,8 @@ from typing import List from enum import StrEnum import random + +# APIM Samples imports from apimtypes import Role diff --git a/shared/python/utils.py b/shared/python/utils.py index 7e254ab..c80cebc 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -14,8 +14,9 @@ import inspect from pathlib import Path from typing import Any -import azure_resources as az +# APIM Samples imports +import azure_resources as az import apimtypes from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, _get_project_root @@ -48,14 +49,7 @@ ) -def does_resource_group_exist(rg_name: str) -> bool: - """Check whether an Azure resource group exists. - - This wrapper keeps `utils.does_resource_group_exist` monkeypatchable while - still delegating to the underlying `azure_resources` implementation. - """ - - return az.does_resource_group_exist(rg_name) +does_resource_group_exist = az.does_resource_group_exist # ------------------------------ # HELPER FUNCTIONS @@ -146,7 +140,7 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow # For infrastructure notebooks, check if update is allowed and handle user choice if allow_update: rg_name = get_infra_rg_name(self.deployment, self.index) - if does_resource_group_exist(rg_name): + if az.does_resource_group_exist(rg_name): # Infrastructure exists, show update dialog try: should_proceed, new_index = _prompt_for_infrastructure_update(rg_name) @@ -164,7 +158,7 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow raise SystemExit("User cancelled deployment") from exc # Check infrastructure existence for the normal flow - infrastructure_exists = does_resource_group_exist(get_infra_rg_name(self.deployment, self.index)) if not allow_update else False + infrastructure_exists = az.does_resource_group_exist(get_infra_rg_name(self.deployment, self.index)) if not allow_update else False if bypass_infrastructure_check or not infrastructure_exists: # Map infrastructure types to their folder names @@ -460,7 +454,7 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: print(f' Resource group : {self.rg_name}\n') # Call the resource group existence check only once - rg_exists = does_resource_group_exist(self.rg_name) + rg_exists = az.does_resource_group_exist(self.rg_name) # If the desired infrastructure doesn't exist, use the interactive selection process if not rg_exists: @@ -785,7 +779,7 @@ def does_infrastructure_exist(infrastructure: INFRASTRUCTURE, index: int, allow_ rg_name = get_infra_rg_name(infrastructure, index) - if does_resource_group_exist(rg_name): + if az.does_resource_group_exist(rg_name): print(f'✅ Infrastructure already exists: {rg_name}\n') if allow_update_option: diff --git a/tests/python/check_python.ps1 b/tests/python/check_python.ps1 index 7ad8419..cd81d07 100644 --- a/tests/python/check_python.ps1 +++ b/tests/python/check_python.ps1 @@ -97,6 +97,19 @@ Write-Host "" $LintStatus = if ($LintExitCode -eq 0) { "✅ PASSED" } else { "⚠️ ISSUES FOUND" } $TestStatus = if ($TestExitCode -eq 0) { "✅ PASSED" } else { "❌ FAILED" } +$PylintScore = $null +$LatestPylintText = Join-Path $ScriptDir "pylint/reports/latest.txt" +if (Test-Path $LatestPylintText) { + $ScoreMatch = Select-String -Path $LatestPylintText -Pattern 'rated at (\d+(?:\.\d+)?/10)' | Select-Object -First 1 + if ($ScoreMatch -and $ScoreMatch.Matches.Count -gt 0) { + $PylintScore = $ScoreMatch.Matches[0].Groups[1].Value + } +} + +if ($PylintScore) { + $LintStatus = "$LintStatus ($PylintScore)" +} + $LintColor = if ($LintExitCode -eq 0) { "Green" } else { "Yellow" } $TestColor = if ($TestExitCode -eq 0) { "Green" } else { "Red" } diff --git a/tests/python/check_python.sh b/tests/python/check_python.sh index eef2486..a3f8886 100644 --- a/tests/python/check_python.sh +++ b/tests/python/check_python.sh @@ -17,6 +17,8 @@ REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" SHOW_REPORT="" TARGET="${1:-infrastructure samples setup shared tests}" +PYLINT_SCORE="" + # Parse arguments if [ "$1" = "--show-report" ]; then SHOW_REPORT="--show-report" @@ -46,6 +48,12 @@ set +e LINT_EXIT_CODE=$? set -e +# Extract pylint score from the latest report, if available +PYLINT_LATEST_TEXT="$SCRIPT_DIR/pylint/reports/latest.txt" +if [ -f "$PYLINT_LATEST_TEXT" ]; then + PYLINT_SCORE=$(grep -Eo 'rated at [0-9]+(\.[0-9]+)?/10' "$PYLINT_LATEST_TEXT" | head -n 1 | awk '{print $3}') +fi + echo "" @@ -76,9 +84,17 @@ echo "╚═══════════════════════ echo "" if [ $LINT_EXIT_CODE -eq 0 ]; then - echo " Pylint: ✅ PASSED" + if [ -n "$PYLINT_SCORE" ]; then + echo " Pylint: ✅ PASSED ($PYLINT_SCORE)" + else + echo " Pylint: ✅ PASSED" + fi else - echo " Pylint: ⚠️ ISSUES FOUND" + if [ -n "$PYLINT_SCORE" ]; then + echo " Pylint: ⚠️ ISSUES FOUND ($PYLINT_SCORE)" + else + echo " Pylint: ⚠️ ISSUES FOUND" + fi fi if [ $TEST_EXIT_CODE -eq 0 ]; then diff --git a/tests/python/test_apimrequests.py b/tests/python/test_apimrequests.py index c09ee21..9dbf141 100644 --- a/tests/python/test_apimrequests.py +++ b/tests/python/test_apimrequests.py @@ -1,6 +1,8 @@ from unittest.mock import patch, MagicMock import requests import pytest + +# APIM Samples imports from apimrequests import ApimRequests from apimtypes import SUBSCRIPTION_KEY_PARAMETER_NAME, HTTP_VERB diff --git a/tests/python/test_apimtesting.py b/tests/python/test_apimtesting.py index 1900363..94454cc 100644 --- a/tests/python/test_apimtesting.py +++ b/tests/python/test_apimtesting.py @@ -5,6 +5,8 @@ from unittest.mock import patch import sys import os + +# APIM Samples imports from apimtesting import ApimTesting from apimtypes import INFRASTRUCTURE @@ -12,7 +14,6 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'shared', 'python')) - # ------------------------------ # TEST INITIALIZATION # ------------------------------ diff --git a/tests/python/test_apimtypes.py b/tests/python/test_apimtypes.py index 9b4a79a..a923bd7 100644 --- a/tests/python/test_apimtypes.py +++ b/tests/python/test_apimtypes.py @@ -4,8 +4,9 @@ from pathlib import Path import pytest -import apimtypes +# APIM Samples imports +import apimtypes # ------------------------------ diff --git a/tests/python/test_authfactory.py b/tests/python/test_authfactory.py index eaacf57..dfa0692 100644 --- a/tests/python/test_authfactory.py +++ b/tests/python/test_authfactory.py @@ -3,6 +3,8 @@ """ import time import pytest + +# APIM Samples imports from authfactory import JwtPayload, SymmetricJwtToken, AuthFactory from users import User diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py index 4a59e66..620fc60 100644 --- a/tests/python/test_azure_resources.py +++ b/tests/python/test_azure_resources.py @@ -6,6 +6,7 @@ from unittest.mock import Mock, patch, mock_open, call import pytest +# APIM Samples imports import azure_resources as az from apimtypes import INFRASTRUCTURE, Endpoints, Output diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index 8341643..96a26b7 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -4,6 +4,8 @@ from unittest.mock import Mock, patch, MagicMock import pytest + +# APIM Samples imports import console import infrastructures from apimtypes import INFRASTRUCTURE, APIM_SKU, APIMNetworkMode, API, PolicyFragment, HTTP_VERB, Output @@ -28,14 +30,10 @@ def mock_utils(): """Mock the utils module to avoid external dependencies.""" with patch('infrastructures.utils') as mock_utils: - mock_utils.get_infra_rg_name.return_value = 'rg-test-infrastructure-01' mock_utils.build_infrastructure_tags.return_value = {'environment': 'test', 'project': 'apim-samples'} mock_utils.read_policy_xml.return_value = '' mock_utils.determine_shared_policy_path.return_value = '/mock/path/policy.xml' - mock_utils.create_resource_group.return_value = None mock_utils.verify_infrastructure.return_value = True - mock_utils.get_account_info.return_value = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription') - mock_utils.get_unique_suffix_for_resource_group.return_value = 'abc123def456' # Mock the run command with proper return object mock_output = Mock() @@ -47,6 +45,20 @@ def mock_utils(): yield mock_utils + +@pytest.fixture(autouse = True) +def mock_az(): + """Mock the azure_resources module used by infrastructures.""" + + with patch('infrastructures.az') as mock_az: + mock_az.get_infra_rg_name.return_value = 'rg-test-infrastructure-01' + mock_az.create_resource_group.return_value = None + mock_az.does_resource_group_exist.return_value = True + mock_az.get_account_info.return_value = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription') + mock_az.get_unique_suffix_for_resource_group.return_value = 'abc123def456' + + yield mock_az + @pytest.fixture def mock_policy_fragments(): """Provide mock policy fragments for testing.""" @@ -141,7 +153,7 @@ def test_infrastructure_creation_with_custom_apis(mock_utils, mock_apis): assert any(api.name == 'hello-world' for api in apis) @pytest.mark.unit -def test_infrastructure_creation_calls_utils_functions(mock_utils): +def test_infrastructure_creation_calls_utils_functions(mock_utils, mock_az): """Test that Infrastructure creation calls expected utility functions.""" infra = infrastructures.Infrastructure( infra=INFRASTRUCTURE.SIMPLE_APIM, @@ -149,7 +161,7 @@ def test_infrastructure_creation_calls_utils_functions(mock_utils): rg_location=TEST_LOCATION ) - mock_utils.get_infra_rg_name.assert_called_once_with(INFRASTRUCTURE.SIMPLE_APIM, TEST_INDEX) + mock_az.get_infra_rg_name.assert_called_once_with(INFRASTRUCTURE.SIMPLE_APIM, TEST_INDEX) mock_utils.build_infrastructure_tags.assert_called_once_with(INFRASTRUCTURE.SIMPLE_APIM) # Initialize policy fragments to trigger utils calls @@ -327,7 +339,7 @@ def test_define_bicep_parameters(mock_utils): # ------------------------------ @pytest.mark.unit -def test_base_infrastructure_verification_success(mock_utils): +def test_base_infrastructure_verification_success(mock_utils, mock_az): """Test base infrastructure verification success.""" infra = infrastructures.Infrastructure( infra=INFRASTRUCTURE.SIMPLE_APIM, @@ -336,7 +348,7 @@ def test_base_infrastructure_verification_success(mock_utils): ) # Mock successful resource group check - mock_utils.does_resource_group_exist.return_value = True + mock_az.does_resource_group_exist.return_value = True # Mock successful APIM service check mock_apim_output = Mock() @@ -358,11 +370,11 @@ def test_base_infrastructure_verification_success(mock_utils): result = infra._verify_infrastructure('test-rg') assert result is True - mock_utils.does_resource_group_exist.assert_called_once_with('test-rg') + mock_az.does_resource_group_exist.assert_called_once_with('test-rg') assert mock_utils.run.call_count >= 2 # At least APIM list and API count @pytest.mark.unit -def test_base_infrastructure_verification_missing_rg(mock_utils): +def test_base_infrastructure_verification_missing_rg(mock_utils, mock_az): """Test base infrastructure verification with missing resource group.""" infra = infrastructures.Infrastructure( infra=INFRASTRUCTURE.SIMPLE_APIM, @@ -371,15 +383,15 @@ def test_base_infrastructure_verification_missing_rg(mock_utils): ) # Mock missing resource group - mock_utils.does_resource_group_exist.return_value = False + mock_az.does_resource_group_exist.return_value = False result = infra._verify_infrastructure('test-rg') assert result is False - mock_utils.does_resource_group_exist.assert_called_once_with('test-rg') + mock_az.does_resource_group_exist.assert_called_once_with('test-rg') @pytest.mark.unit -def test_base_infrastructure_verification_missing_apim(mock_utils): +def test_base_infrastructure_verification_missing_apim(mock_utils, mock_az): """Test base infrastructure verification with missing APIM service.""" infra = infrastructures.Infrastructure( infra=INFRASTRUCTURE.SIMPLE_APIM, @@ -388,7 +400,7 @@ def test_base_infrastructure_verification_missing_apim(mock_utils): ) # Mock successful resource group check - mock_utils.does_resource_group_exist.return_value = True + mock_az.does_resource_group_exist.return_value = True # Mock failed APIM service check mock_apim_output = Mock() @@ -595,7 +607,7 @@ def test_all_concrete_infrastructure_classes_have_verification(mock_utils): @patch('os.getcwd') @patch('os.chdir') @patch('pathlib.Path') -def test_deploy_infrastructure_success(mock_path_class, mock_chdir, mock_getcwd, mock_utils): +def test_deploy_infrastructure_success(mock_path_class, mock_chdir, mock_getcwd, mock_utils, mock_az): """Test successful infrastructure deployment.""" # Setup mocks mock_getcwd.return_value = '/original/path' @@ -624,7 +636,7 @@ def verify_infrastructure(self) -> bool: result = infra.deploy_infrastructure() # Verify the deployment process - mock_utils.create_resource_group.assert_called_once() + mock_az.create_resource_group.assert_called_once() # The utils.run method is now called multiple times (deployment + verification steps) assert mock_utils.run.call_count >= 1 # At least one call for deployment # Note: utils.verify_infrastructure is currently commented out in the actual code @@ -645,7 +657,7 @@ def verify_infrastructure(self) -> bool: @patch('os.getcwd') @patch('os.chdir') @patch('pathlib.Path') -def test_deploy_infrastructure_failure(mock_path_class, mock_chdir, mock_getcwd, mock_utils): +def test_deploy_infrastructure_failure(mock_path_class, mock_chdir, mock_getcwd, mock_utils, mock_az): """Test infrastructure deployment failure.""" # Setup mocks for failure scenario mock_getcwd.return_value = '/original/path' @@ -677,7 +689,7 @@ def verify_infrastructure(self) -> bool: result = infra.deploy_infrastructure() # Verify the deployment process was attempted - mock_utils.create_resource_group.assert_called_once() + mock_az.create_resource_group.assert_called_once() mock_utils.run.assert_called_once() # Note: utils.verify_infrastructure is currently commented out in the actual code # mock_utils.verify_infrastructure.assert_not_called() # Should not be called on failure @@ -1156,7 +1168,7 @@ def mock_get_infra_rg_name(deployment, index): return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1197,7 +1209,7 @@ def mock_get_infra_rg_name(deployment, index): return f'apim-infra-{deployment.value}-{index}' monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) @@ -1261,7 +1273,7 @@ def mock_run(*args, **kwargs): return Output(success=True, text='{}') monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1344,7 +1356,7 @@ def mock_get_infra_rg_name(deployment, index): return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) # Test all infrastructure types @@ -1379,7 +1391,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1449,7 +1461,7 @@ def mock_get_infra_rg_name(deployment, index): return f'test-rg-{deployment.value}-{index}' if index else f'test-rg-{deployment.value}' monkeypatch.setattr(utils, 'run', mock_run) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) @@ -1490,7 +1502,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) diff --git a/tests/python/test_users.py b/tests/python/test_users.py index 69ca3d5..34c9418 100644 --- a/tests/python/test_users.py +++ b/tests/python/test_users.py @@ -4,6 +4,8 @@ import random import pytest + +# APIM Samples imports from users import User, UserName, Users, UserHelper from apimtypes import Role diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 70ff5d8..ac588b9 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -9,6 +9,8 @@ from unittest.mock import MagicMock, mock_open import json import pytest + +# APIM Samples imports from apimtypes import INFRASTRUCTURE, APIM_SKU import utils import json_utils @@ -596,7 +598,7 @@ def test_bicep_directory_determination_edge_cases(monkeypatch, tmp_path): def test_create_resource_group_edge_cases(monkeypatch): """Test create resource group with edge cases.""" # Test with empty tags - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda x: False) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: False) def mock_run_with_tags(*args, **kwargs): cmd = args[0] @@ -770,7 +772,7 @@ def mock_prompt(rg_name): return (False, 3) # Second retry with index 3 monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', mock_prompt) - monkeypatch.setattr(utils, 'does_resource_group_exist', mock_rg_exists) + monkeypatch.setattr(az, 'does_resource_group_exist', mock_rg_exists) # Mock subprocess execution to succeed class MockProcess: @@ -802,7 +804,7 @@ def test_infrastructure_notebook_helper_create_user_cancellation(monkeypatch): helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) # Mock resource group to exist (triggering prompt) - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg_name: True) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg_name: True) # Mock the prompt to return cancellation (option 3) monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', lambda rg_name: (False, None)) @@ -820,7 +822,7 @@ def test_infrastructure_notebook_helper_create_keyboard_interrupt_during_prompt( helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) # Mock resource group to exist (triggering prompt) - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg_name: True) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg_name: True) # Mock the prompt to raise KeyboardInterrupt def mock_prompt(rg_name): @@ -841,7 +843,7 @@ def test_infrastructure_notebook_helper_create_eof_error_during_prompt(monkeypat helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) # Mock resource group to exist (triggering prompt) - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg_name: True) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg_name: True) # Mock the prompt to raise EOFError def mock_prompt(rg_name): @@ -864,7 +866,7 @@ def test_deploy_sample_with_infrastructure_selection(monkeypatch): ) # Mock does_resource_group_exist to return False for original, triggering selection - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg: False) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg: False) # Mock infrastructure selection to return a valid infrastructure selected_infra = INFRASTRUCTURE.APIM_ACA @@ -900,7 +902,7 @@ def test_deploy_sample_no_infrastructure_found(monkeypatch): ) # Mock does_resource_group_exist to return False for original - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg: False) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg: False) # Mock infrastructure selection to return None (no infrastructure found) monkeypatch.setattr(nb_helper, '_query_and_select_infrastructure', @@ -921,7 +923,7 @@ def test_deploy_sample_existing_infrastructure(monkeypatch): ) # Mock does_resource_group_exist to return True (infrastructure exists) - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg: True) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg: True) # Mock successful deployment mock_output = utils.Output(success=True, text='{"outputs": {"test": "value"}}') @@ -947,7 +949,7 @@ def test_deploy_sample_deployment_failure(monkeypatch): ) # Mock does_resource_group_exist to return True - monkeypatch.setattr(utils, 'does_resource_group_exist', lambda rg: True) + monkeypatch.setattr(az, 'does_resource_group_exist', lambda rg: True) # Mock failed deployment mock_output = utils.Output(success=False, text='Deployment failed') diff --git a/tests/python/test_verify_local_setup.py b/tests/python/test_verify_local_setup.py index 90bbb42..9a3a2ce 100644 --- a/tests/python/test_verify_local_setup.py +++ b/tests/python/test_verify_local_setup.py @@ -7,7 +7,6 @@ from pathlib import Path from types import ModuleType, SimpleNamespace from typing import Any, TYPE_CHECKING, cast - import pytest # Ensure the setup folder is on sys.path so the verification script is importable. From 055ad4b272dd27e4492fca5695e3daf973fdc592 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 12:56:43 -0500 Subject: [PATCH 16/23] Fix lint and tests --- .github/copilot-instructions.python.md | 13 ++++- shared/python/apimrequests.py | 64 ++++++++++----------- shared/python/apimtypes.py | 10 +--- shared/python/utils.py | 9 ++- tests/python/test_apimrequests.py | 79 +++++++++++++------------- tests/python/test_apimtypes.py | 6 +- tests/python/test_utils.py | 6 +- 7 files changed, 97 insertions(+), 90 deletions(-) diff --git a/.github/copilot-instructions.python.md b/.github/copilot-instructions.python.md index 5a067f8..4aae6e7 100644 --- a/.github/copilot-instructions.python.md +++ b/.github/copilot-instructions.python.md @@ -19,12 +19,23 @@ applyTo: "**/*.py" - Prefer Python 3.12+ features unless otherwise required. - Keep all imports at the top of the file. -- Imports from this repo should be grouped, be specific (e.g. `from X import Y, Z`) be imported last, and have a comment header called `# APIM Samples imports` - Use type hints and concise docstrings (PEP 257). - Use 4-space indentation and PEP 8 conventions. - Use only straight quotes (U+0027 and U+0022), not typographic quotes. - Use whitespace to separate logical sections and add a blank line before `return` statements. +## Import Style Guidelines + +- Imports from this repo should be grouped, be imported last, and have a group header called `# APIM Samples imports` +- **Prefer specific imports** over module imports for clarity: `from module import Class, function` +- **Use aliases for frequently-used modules**: `import azure_resources as az` +- **Console module**: Always use specific imports: `from console import print_error, print_info, ...` +- **Avoid mixing patterns**: Don't use both `import module` and `from module import ...` for the same module +- **Order within APIM Samples imports section**: + 1. Module imports with aliases (e.g., `import azure_resources as az`) + 2. Specific type/constant imports (e.g., `from apimtypes import INFRASTRUCTURE`) + 3. Specific function imports (e.g., `from console import print_error`) + ## Linting (pylint) - Respect the repository pylint configuration at `tests/python/.pylintrc`. diff --git a/shared/python/apimrequests.py b/shared/python/apimrequests.py index 3e52fd4..fb167ab 100644 --- a/shared/python/apimrequests.py +++ b/shared/python/apimrequests.py @@ -10,7 +10,7 @@ # APIM Samples imports from apimtypes import HTTP_VERB, SUBSCRIPTION_KEY_PARAMETER_NAME, SLEEP_TIME_BETWEEN_REQUESTS_MS -import console +from console import BOLD_G, BOLD_R, RESET, print_error, print_info, print_message, print_ok, print_val # Disable SSL warnings for self-signed certificates urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -124,21 +124,21 @@ def _request(self, method: HTTP_VERB, path: str, headers: list[any] = None, data try: if msg: - console.print_message(msg, blank_above = True) + print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - console.print_info(f'{method.value} {url}') + print_info(f'{method.value} {url}') merged_headers = self.headers.copy() if headers: merged_headers.update(headers) - console.print_info(merged_headers) + print_info(merged_headers) response = requests.request(method.value, url, headers = merged_headers, json = data, verify = False, timeout = 30) @@ -157,7 +157,7 @@ def _request(self, method: HTTP_VERB, path: str, headers: list[any] = None, data return responseBody except requests.exceptions.RequestException as e: - console.print_error(f'Error making request: {e}') + print_error(f'Error making request: {e}') return None def _multiRequest(self, method: HTTP_VERB, path: str, runs: int, headers: list[any] = None, data: any = None, msg: str | None = None, printResponse: bool = True, sleepMs: int | None = None) -> list[dict[str, Any]]: # pylint: disable=invalid-name,too-many-locals @@ -185,22 +185,22 @@ def _multiRequest(self, method: HTTP_VERB, path: str, runs: int, headers: list[a try: if msg: - console.print_message(msg, blank_above = True) + print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - console.print_info(f'{method.value} {url}') + print_info(f'{method.value} {url}') for i in range(runs): - console.print_info(f'▶️ Run {i + 1}/{runs}:') + print_info(f'▶️ Run {i + 1}/{runs}:') start_time = time.time() response = session.request(method.value, url, json = data, verify = False) response_time = time.time() - start_time - console.print_info(f'⌚ {response_time:.2f} seconds') + print_info(f'⌚ {response_time:.2f} seconds') self._print_response_code(response) @@ -234,16 +234,16 @@ def _print_response(self, response) -> None: """ self._print_response_code(response) - console.print_val('Response headers', response.headers, True) + print_val('Response headers', response.headers, True) if response.status_code == 200: try: data = json.loads(response.text) - console.print_val('Response body', json.dumps(data, indent = 4), True) + print_val('Response body', json.dumps(data, indent = 4), True) except Exception: - console.print_val('Response body', response.text, True) + print_val('Response body', response.text, True) else: - console.print_val('Response body', response.text, True) + print_val('Response body', response.text, True) def _print_response_code(self, response) -> None: """ @@ -251,13 +251,13 @@ def _print_response_code(self, response) -> None: """ if 200 <= response.status_code < 300: - status_code_str = f'{console.BOLD_G}{response.status_code} - {response.reason}{console.RESET}' + status_code_str = f'{BOLD_G}{response.status_code} - {response.reason}{RESET}' elif response.status_code >= 400: - status_code_str = f'{console.BOLD_R}{response.status_code} - {response.reason}{console.RESET}' + status_code_str = f'{BOLD_R}{response.status_code} - {response.reason}{RESET}' else: status_code_str = str(response.status_code) - console.print_val('Response status', status_code_str) + print_val('Response status', status_code_str) def _poll_async_operation(self, location_url: str, headers: dict = None, timeout: int = 60, poll_interval: int = 2) -> requests.Response | None: """ @@ -276,28 +276,28 @@ def _poll_async_operation(self, location_url: str, headers: dict = None, timeout while time.time() - start_time < timeout: try: - console.print_info(f'GET {location_url}', True) - console.print_info(headers) + print_info(f'GET {location_url}', True) + print_info(headers) response = requests.get(location_url, headers = headers or {}, verify = False, timeout = 30) - console.print_info(f'Polling operation - Status: {response.status_code}') + print_info(f'Polling operation - Status: {response.status_code}') if response.status_code == 200: - console.print_ok('Async operation completed successfully!') + print_ok('Async operation completed successfully!') return response if response.status_code == 202: - console.print_info(f'Operation still in progress, waiting {poll_interval} seconds...') + print_info(f'Operation still in progress, waiting {poll_interval} seconds...') time.sleep(poll_interval) else: - console.print_error(f'Unexpected status code during polling: {response.status_code}') + print_error(f'Unexpected status code during polling: {response.status_code}') return response except requests.exceptions.RequestException as e: - console.print_error(f'Error polling operation: {e}') + print_error(f'Error polling operation: {e}') return None - console.print_error(f'Async operation timeout reached after {timeout} seconds') + print_error(f'Async operation timeout reached after {timeout} seconds') return None # ------------------------------ @@ -372,32 +372,32 @@ def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | try: if msg: - console.print_message(msg, blank_above = True) + print_message(msg, blank_above = True) # Ensure path has a leading slash if not path.startswith('/'): path = '/' + path url = self._url + path - console.print_info(f'POST {url}') + print_info(f'POST {url}') merged_headers = self.headers.copy() if headers: merged_headers.update(headers) - console.print_info(merged_headers) + print_info(merged_headers) # Make the initial async request response = requests.request(HTTP_VERB.POST.value, url, headers = merged_headers, json = data, verify = False, timeout = 30) - console.print_info(f'Initial response status: {response.status_code}') + print_info(f'Initial response status: {response.status_code}') if response.status_code == 202: # Accepted - async operation started location_header = response.headers.get('Location') if location_header: - console.print_info(f'Found Location header: {location_header}') + print_info(f'Found Location header: {location_header}') # Poll the location URL until completion final_response = self._poll_async_operation(location_header, timeout = timeout, poll_interval = poll_interval ) @@ -416,10 +416,10 @@ def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | return responseBody - console.print_error('Async operation failed or timed out') + print_error('Async operation failed or timed out') return None - console.print_error('No Location header found in 202 response') + print_error('No Location header found in 202 response') if printResponse: self._print_response(response) return None @@ -439,5 +439,5 @@ def singlePostAsync(self, path: str, *, headers = None, data = None, msg: str | return responseBody except requests.exceptions.RequestException as e: - console.print_error(f'Error making request: {e}') + print_error(f'Error making request: {e}') return None diff --git a/shared/python/apimtypes.py b/shared/python/apimtypes.py index 5debf01..8eaa77d 100644 --- a/shared/python/apimtypes.py +++ b/shared/python/apimtypes.py @@ -15,11 +15,7 @@ from json_utils import is_string_json, extract_json -# ------------------------------ -# PRIVATE METHODS -# ------------------------------ - -def _get_project_root() -> Path: +def get_project_root() -> Path: """Get the project root directory path.""" # Try to get from environment variable first (set by .env file) if 'PROJECT_ROOT' in os.environ: @@ -38,7 +34,7 @@ def _get_project_root() -> Path: return Path(__file__).resolve().parent.parent.parent # Get project root and construct absolute paths to policy files -_PROJECT_ROOT = _get_project_root() +_PROJECT_ROOT = get_project_root() _SHARED_XML_POLICY_BASE_PATH = _PROJECT_ROOT / 'shared' / 'apim-policies' # Policy file paths (now absolute and platform-independent) @@ -79,7 +75,7 @@ def _get_project_root() -> Path: 'PolicyFragment', 'Product', # Functions - '_get_project_root', + 'get_project_root', ] diff --git a/shared/python/utils.py b/shared/python/utils.py index c80cebc..33967dd 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -17,8 +17,7 @@ # APIM Samples imports import azure_resources as az -import apimtypes -from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, _get_project_root +from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, get_project_root # ------------------------------ # RE-EXPORTS (BACKWARD COMPATIBILITY) @@ -550,7 +549,7 @@ def _determine_bicep_directory(infrastructure_dir: str) -> str: # Try to find the project root and construct the path from there try: - project_root = _get_project_root() + project_root = get_project_root() bicep_dir = os.path.join(str(project_root), 'infrastructure', infrastructure_dir) if os.path.exists(bicep_dir): return bicep_dir @@ -629,7 +628,7 @@ def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: st print('\nDeploying bicep...\n') return run(cmd, f"Deployment '{deployment_name}' succeeded", f"Deployment '{deployment_name}' failed.", print_command_to_run = False) -# TODO: Reconcile this with apimtypes.py _get_project_root +# TODO: Reconcile this with apimtypes.py get_project_root def find_project_root() -> str: """ Find the project root directory by looking for specific marker files. @@ -894,7 +893,7 @@ def determine_policy_path(policy_xml_filepath_or_filename: str, sample_name: str raise ValueError(f'Could not auto-detect sample name. Please provide sample_name parameter explicitly. Error: {e}') from e # Construct the full path - project_root = apimtypes._get_project_root() + project_root = get_project_root() policy_xml_filepath = str(Path(project_root) / 'samples' / sample_name / policy_xml_filepath_or_filename) return policy_xml_filepath diff --git a/tests/python/test_apimrequests.py b/tests/python/test_apimrequests.py index 9dbf141..141edf1 100644 --- a/tests/python/test_apimrequests.py +++ b/tests/python/test_apimrequests.py @@ -38,9 +38,9 @@ def test_init_no_key(): @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') +@patch('apimrequests.print_error') def test_single_get_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_response = MagicMock() mock_response.status_code = 200 @@ -58,9 +58,9 @@ def test_single_get_success(mock_print_error, mock_print_info, mock_print_messag @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') +@patch('apimrequests.print_error') def test_single_get_error(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_request.side_effect = requests.exceptions.RequestException('fail') result = apim.singleGet(DEFAULT_PATH, printResponse=True) @@ -69,9 +69,9 @@ def test_single_get_error(mock_print_error, mock_print_info, mock_print_message, @pytest.mark.http @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') +@patch('apimrequests.print_error') def test_single_post_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): mock_response = MagicMock() mock_response.status_code = 201 @@ -89,8 +89,8 @@ def test_single_post_success(mock_print_error, mock_print_info, mock_print_messa @pytest.mark.http @patch('apimrequests.requests.Session') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') def test_multi_get_success(mock_print_info, mock_print_message, mock_session, apim): mock_sess = MagicMock() mock_response = MagicMock() @@ -113,8 +113,8 @@ def test_multi_get_success(mock_print_info, mock_print_message, mock_session, ap @pytest.mark.http @patch('apimrequests.requests.Session') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') def test_multi_get_error(mock_print_info, mock_print_message, mock_session, apim): mock_sess = MagicMock() mock_sess.request.side_effect = requests.exceptions.RequestException('fail') @@ -137,7 +137,7 @@ def make_apim(): def test_single_post_error(): apim = make_apim() with patch('apimrequests.requests.request') as mock_request, \ - patch('apimrequests.console.print_error') as mock_print_error: + patch('apimrequests.print_error') as mock_print_error: mock_request.side_effect = requests.RequestException('fail') result = apim.singlePost(PATH, data={'foo': 'bar'}, printResponse=True) assert result is None @@ -190,7 +190,7 @@ def test_print_response_code_edge(): class DummyResponse: status_code = 302 reason = 'Found' - with patch('apimrequests.console.print_val') as mock_print_val: + with patch('apimrequests.print_val') as mock_print_val: apim._print_response_code(DummyResponse()) mock_print_val.assert_called_with('Response status', '302') @@ -324,8 +324,8 @@ def test_headers_setter(apim): @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') def test_request_with_message(mock_print_info, mock_print_message, mock_request, apim): """Test _request method with message parameter.""" mock_response = MagicMock() @@ -343,7 +343,7 @@ def test_request_with_message(mock_print_info, mock_print_message, mock_request, @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_request_path_without_leading_slash(mock_print_info, mock_request, apim): """Test _request method with PATH without leading slash.""" mock_response = MagicMock() @@ -365,8 +365,8 @@ def test_request_path_without_leading_slash(mock_print_info, mock_request, apim) @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') def test_multi_request_with_message(mock_print_info, mock_print_message, mock_session_class, apim): """Test _multiRequest method with message parameter.""" mock_session = MagicMock() @@ -388,7 +388,7 @@ def test_multi_request_with_message(mock_print_info, mock_print_message, mock_se @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_class, apim): """Test _multiRequest method with PATH without leading slash.""" mock_session = MagicMock() @@ -413,7 +413,7 @@ def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_ @pytest.mark.unit @patch('apimrequests.requests.Session') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_multi_request_non_json_response(mock_print_info, mock_session_class, apim): """Test _multiRequest method with non-JSON response.""" mock_session = MagicMock() @@ -433,7 +433,7 @@ def test_multi_request_non_json_response(mock_print_info, mock_session_class, ap @pytest.mark.unit -@patch('apimrequests.console.print_val') +@patch('apimrequests.print_val') def test_print_response_non_200_status(mock_print_val, apim): """Test _print_response method with non-200 status code.""" mock_response = MagicMock() @@ -451,8 +451,8 @@ def test_print_response_non_200_status(mock_print_val, apim): @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_ok') +@patch('apimrequests.print_info') +@patch('apimrequests.print_ok') @patch('apimrequests.time.sleep') def test_poll_async_operation_success(mock_sleep, mock_print_ok, mock_print_info, mock_get, apim): """Test _poll_async_operation method with successful completion.""" @@ -468,8 +468,8 @@ def test_poll_async_operation_success(mock_sleep, mock_print_ok, mock_print_info @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_info') +@patch('apimrequests.print_error') @patch('apimrequests.time.sleep') def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_error, mock_print_info, mock_get, apim): """Test _poll_async_operation method with in-progress then success.""" @@ -489,7 +489,7 @@ def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_er @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_error') def test_poll_async_operation_unexpected_status(mock_print_error, mock_get, apim): """Test _poll_async_operation method with unexpected status code.""" mock_response = MagicMock() @@ -504,7 +504,7 @@ def test_poll_async_operation_unexpected_status(mock_print_error, mock_get, apim @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_error') def test_poll_async_operation_request_exception(mock_print_error, mock_get, apim): """Test _poll_async_operation method with request exception.""" mock_get.side_effect = requests.exceptions.RequestException('Connection error') @@ -517,7 +517,7 @@ def test_poll_async_operation_request_exception(mock_print_error, mock_get, apim @pytest.mark.unit @patch('apimrequests.requests.get') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_error') @patch('apimrequests.time.time') @patch('apimrequests.time.sleep') def test_poll_async_operation_timeout(mock_sleep, mock_time, mock_print_error, mock_get, apim): @@ -537,8 +537,8 @@ def test_poll_async_operation_timeout(mock_sleep, mock_time, mock_print_error, m @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_message') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_message') +@patch('apimrequests.print_info') def test_single_post_async_success_with_location(mock_print_info, mock_print_message, mock_request, apim): """Test singlePostAsync method with successful async operation.""" # Mock initial 202 response with Location header @@ -567,8 +567,8 @@ def test_single_post_async_success_with_location(mock_print_info, mock_print_mes @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_info') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_info') +@patch('apimrequests.print_error') def test_single_post_async_no_location_header(mock_print_error, mock_print_info, mock_request, apim): """Test singlePostAsync method with 202 response but no Location header.""" mock_response = MagicMock() @@ -586,7 +586,7 @@ def test_single_post_async_no_location_header(mock_print_error, mock_print_info, @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_single_post_async_non_async_response(mock_print_info, mock_request, apim): """Test singlePostAsync method with non-async (immediate) response.""" mock_response = MagicMock() @@ -605,7 +605,7 @@ def test_single_post_async_non_async_response(mock_print_info, mock_request, api @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_error') def test_single_post_async_request_exception(mock_print_error, mock_request, apim): """Test singlePostAsync method with request exception.""" mock_request.side_effect = requests.exceptions.RequestException('Connection error') @@ -618,7 +618,7 @@ def test_single_post_async_request_exception(mock_print_error, mock_request, api @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_error') +@patch('apimrequests.print_error') def test_single_post_async_failed_polling(mock_print_error, mock_request, apim): """Test singlePostAsync method with failed async operation polling.""" initial_response = MagicMock() @@ -636,7 +636,7 @@ def test_single_post_async_failed_polling(mock_print_error, mock_request, apim): @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_single_post_async_path_without_leading_slash(mock_print_info, mock_request, apim): """Test singlePostAsync method with PATH without leading slash.""" mock_response = MagicMock() @@ -658,7 +658,7 @@ def test_single_post_async_path_without_leading_slash(mock_print_info, mock_requ @pytest.mark.unit @patch('apimrequests.requests.request') -@patch('apimrequests.console.print_info') +@patch('apimrequests.print_info') def test_single_post_async_non_json_response(mock_print_info, mock_request, apim): """Test singlePostAsync method with non-JSON response.""" mock_response = MagicMock() @@ -671,3 +671,4 @@ def test_single_post_async_non_json_response(mock_print_info, mock_request, apim result = apim.singlePostAsync('/test') assert result == 'Plain text result' + diff --git a/tests/python/test_apimtypes.py b/tests/python/test_apimtypes.py index a923bd7..6675714 100644 --- a/tests/python/test_apimtypes.py +++ b/tests/python/test_apimtypes.py @@ -814,11 +814,11 @@ def test_policy_fragment_repr(): # ADDITIONAL COVERAGE TESTS # ------------------------------ -def test_get_project_root_functionality(): - """Test _get_project_root function comprehensively.""" +def testget_project_root_functionality(): + """Test get_project_root function comprehensively.""" # This function should return the project root - root = apimtypes._get_project_root() + root = apimtypes.get_project_root() assert isinstance(root, Path) assert root.exists() diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index ac588b9..d27a987 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -94,7 +94,7 @@ def mock_inspect_currentframe(): return frame monkeypatch.setattr('inspect.currentframe', mock_inspect_currentframe) - monkeypatch.setattr('apimtypes._get_project_root', lambda: Path('/project')) + monkeypatch.setattr('utils.get_project_root', lambda: Path('/project')) named_values = { 'jwt_signing_key': 'JwtSigningKey123' @@ -414,7 +414,7 @@ def test_determine_policy_path_filename_mode(monkeypatch): # Mock the project root mock_project_root = Path('/mock/project/root') - monkeypatch.setattr('apimtypes._get_project_root', lambda: mock_project_root) + monkeypatch.setattr('utils.get_project_root', lambda: mock_project_root) # Mock current frame to simulate being in samples/test-sample class MockFrame: @@ -464,7 +464,7 @@ def test_wait_for_apim_blob_permissions_failure(monkeypatch): def test_read_policy_xml_with_sample_name_explicit(monkeypatch): """Test read_policy_xml with explicit sample name.""" mock_project_root = Path('/mock/project/root') - monkeypatch.setattr('apimtypes._get_project_root', lambda: mock_project_root) + monkeypatch.setattr('utils.get_project_root', lambda: mock_project_root) xml_content = '' m = mock_open(read_data=xml_content) From dd34e3dee8fdea61bb50716d36bf25429f8461a6 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 14:57:12 -0500 Subject: [PATCH 17/23] Change private _run to public run --- .github/copilot-instructions.python.md | 19 +++- .../apim-aca/create_infrastructure.py | 7 +- shared/python/apimtypes.py | 33 +------ shared/python/authfactory.py | 3 +- shared/python/azure_resources.py | 61 ++++-------- shared/python/console.py | 12 --- shared/python/infrastructures.py | 92 ++++++++----------- shared/python/utils.py | 65 ++++--------- tests/python/test_azure_resources.py | 40 ++++---- tests/python/test_infrastructures.py | 78 ++++++++-------- tests/python/test_utils.py | 72 +++++++-------- 11 files changed, 189 insertions(+), 293 deletions(-) diff --git a/.github/copilot-instructions.python.md b/.github/copilot-instructions.python.md index 4aae6e7..3864714 100644 --- a/.github/copilot-instructions.python.md +++ b/.github/copilot-instructions.python.md @@ -27,11 +27,20 @@ applyTo: "**/*.py" ## Import Style Guidelines - Imports from this repo should be grouped, be imported last, and have a group header called `# APIM Samples imports` -- **Prefer specific imports** over module imports for clarity: `from module import Class, function` -- **Use aliases for frequently-used modules**: `import azure_resources as az` -- **Console module**: Always use specific imports: `from console import print_error, print_info, ...` -- **Avoid mixing patterns**: Don't use both `import module` and `from module import ...` for the same module -- **Order within APIM Samples imports section**: +- Only use multi-line imports when a single-line is too long +- Avoid mixing patterns: Don't use both `import module` and `from module import ...` for the same module +- Parentheses in imports: Only use parentheses for multi-line imports, not for single-line imports: + - Good: `from console import print_error, print_val` + - Bad: `from console import (print_error, print_val)` + - Good (multi-line): + ```python + from console import ( + print_error, + print_info, + print_ok + ) + ``` +- Order within APIM Samples imports section: 1. Module imports with aliases (e.g., `import azure_resources as az`) 2. Specific type/constant imports (e.g., `from apimtypes import INFRASTRUCTURE`) 3. Specific function imports (e.g., `from console import print_error`) diff --git a/infrastructure/apim-aca/create_infrastructure.py b/infrastructure/apim-aca/create_infrastructure.py index 0a5acea..85e7b7a 100644 --- a/infrastructure/apim-aca/create_infrastructure.py +++ b/infrastructure/apim-aca/create_infrastructure.py @@ -7,10 +7,9 @@ # APIM Samples imports import azure_resources as az -from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH -from apimtypes import INFRASTRUCTURE +from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH, INFRASTRUCTURE from infrastructures import ApimAcaInfrastructure -import utils +from utils import read_policy_xml def create_infrastructure(location: str, index: int, apim_sku: APIM_SKU) -> None: @@ -40,7 +39,7 @@ def _create_aca_specific_apis() -> list[API]: """ # Define the APIs with Container Apps backends - pol_backend = utils.read_policy_xml(BACKEND_XML_POLICY_PATH) + pol_backend = read_policy_xml(BACKEND_XML_POLICY_PATH) pol_aca_backend_1 = pol_backend.format(backend_id = 'aca-backend-1') pol_aca_backend_2 = pol_backend.format(backend_id = 'aca-backend-2') pol_aca_backend_pool = pol_backend.format(backend_id = 'aca-backend-pool') diff --git a/shared/python/apimtypes.py b/shared/python/apimtypes.py index 8eaa77d..0616bc5 100644 --- a/shared/python/apimtypes.py +++ b/shared/python/apimtypes.py @@ -11,7 +11,7 @@ from typing import List, Optional, Any # APIM Samples imports -from console import (print_error, print_val) +from console import print_error, print_val from json_utils import is_string_json, extract_json @@ -47,37 +47,6 @@ def get_project_root() -> Path: SUBSCRIPTION_KEY_PARAMETER_NAME = 'api-key' SLEEP_TIME_BETWEEN_REQUESTS_MS = 50 -# Explicitly define what is exported with 'from apimtypes import *' -__all__ = [ - # Constants - 'DEFAULT_XML_POLICY_PATH', - 'HELLO_WORLD_XML_POLICY_PATH', - 'REQUEST_HEADERS_XML_POLICY_PATH', - 'BACKEND_XML_POLICY_PATH', - 'API_ID_XML_POLICY_PATH', - 'SUBSCRIPTION_KEY_PARAMETER_NAME', - 'SLEEP_TIME_BETWEEN_REQUESTS_MS', - # Enums - 'Role', - 'APIMNetworkMode', - 'APIM_SKU', - 'HTTP_VERB', - 'INFRASTRUCTURE', - # Data classes and regular classes - 'Endpoints', - 'Output', - 'API', - 'APIOperation', - 'GET_APIOperation', - 'GET_APIOperation2', - 'POST_APIOperation', - 'NamedValue', - 'PolicyFragment', - 'Product', - # Functions - 'get_project_root', -] - # ------------------------------ # PRIVATE METHODS diff --git a/shared/python/authfactory.py b/shared/python/authfactory.py index 2b47a04..a52bf91 100644 --- a/shared/python/authfactory.py +++ b/shared/python/authfactory.py @@ -4,9 +4,10 @@ from typing import Any import time -from users import User import jwt +# APIM Samples imports +from users import User # ------------------------------ # CLASSES diff --git a/shared/python/azure_resources.py b/shared/python/azure_resources.py index e6a0e3b..7375356 100644 --- a/shared/python/azure_resources.py +++ b/shared/python/azure_resources.py @@ -18,35 +18,12 @@ from apimtypes import INFRASTRUCTURE, Endpoints, Output from console import print_ok, print_warning, print_error, print_val, print_message, print_info, print_command, print_success -# Explicitly define what is exported with 'from azure_resources import *' -__all__ = [ - # Public functions - 'cleanup_old_jwt_signing_keys', - 'check_apim_blob_permissions', - 'find_infrastructure_instances', - 'create_resource_group', - 'get_azure_role_guid', - 'does_resource_group_exist', - 'get_resource_group_location', - 'get_account_info', - 'get_deployment_name', - 'get_frontdoor_url', - 'get_apim_url', - 'get_appgw_endpoint', - 'get_infra_rg_name', - 'get_unique_suffix_for_resource_group', - 'get_rg_name', - 'get_endpoints', - # Private functions (exported for backward compatibility) - '_run', -] - # ------------------------------ # PRIVATE FUNCTIONS # ------------------------------ -def _run(command: str, ok_message: str = '', error_message: str = '', print_output: bool = False, print_command_to_run: bool = True, print_errors: bool = True, print_warnings: bool = True) -> Output: +def run(command: str, ok_message: str = '', error_message: str = '', print_output: bool = False, print_command_to_run: bool = True, print_errors: bool = True, print_warnings: bool = True) -> Output: """ Execute a shell command, log the command and its output, and attempt to extract JSON from the output. @@ -151,7 +128,7 @@ def cleanup_old_jwt_signing_keys(apim_name: str, resource_group_name: str, curre # Get all named values that start with 'JwtSigningKey' print_info(f"Getting all JWT signing key named values from APIM '{apim_name}'...") - output = _run( + output = run( f'az apim nv list --service-name "{apim_name}" --resource-group "{resource_group_name}" --query "[?contains(name, \'JwtSigningKey\')].name" -o tsv', 'Retrieved JWT signing keys', 'Failed to retrieve JWT signing keys' @@ -186,7 +163,7 @@ def cleanup_old_jwt_signing_keys(apim_name: str, resource_group_name: str, curre kept_count += 1 else: print_info(f'Deleting old JWT key: {jwt_key}') - delete_output = _run( + delete_output = run( f'az apim nv delete --service-name "{apim_name}" --resource-group "{resource_group_name}" --named-value-id "{jwt_key}" --yes', f'Deleted old JWT key: {jwt_key}', f'Failed to delete JWT key: {jwt_key}', @@ -226,7 +203,7 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou # Get APIM's managed identity principal ID print_info('Getting APIM managed identity...') - apim_identity_output = _run( + apim_identity_output = run( f'az apim show --name {apim_name} --resource-group {resource_group_name} --query identity.principalId -o tsv', error_message='Failed to get APIM managed identity', print_command_to_run=True @@ -239,7 +216,7 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou principal_id = apim_identity_output.text.strip() print_info(f'APIM managed identity principal ID: {principal_id}') # Get storage account resource ID # Remove suppression flags to get raw output, then extract resource ID with regex - storage_account_output = _run( + storage_account_output = run( f'az storage account show --name {storage_account_name} --resource-group {resource_group_name} --query id -o tsv', error_message='Failed to get storage account resource ID', print_command_to_run=True @@ -268,7 +245,7 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou while elapsed_time < max_wait_seconds: # Check if role assignment exists - role_assignment_output = _run( + role_assignment_output = run( f"az role assignment list --assignee {principal_id} --scope {storage_account_id} --role {blob_reader_role_id} --query '[0].id' -o tsv", error_message='Failed to check role assignment', print_command_to_run=True, @@ -280,7 +257,7 @@ def check_apim_blob_permissions(apim_name: str, storage_account_name: str, resou # Additional check: try to test blob access using the managed identity print_info('Testing actual blob access...') - test_access_output = _run( + test_access_output = run( f"az storage blob list --account-name {storage_account_name} --container-name samples --auth-mode login --only-show-errors --query '[0].name' -o tsv 2>/dev/null || echo 'access-test-failed'", error_message='', print_command_to_run=True, @@ -327,7 +304,7 @@ def find_infrastructure_instances(infrastructure: INFRASTRUCTURE) -> list[tuple[ # Query Azure for resource groups with the infrastructure tag query_cmd = f'az group list --tag infrastructure={infrastructure.value} --query "[].name" -o tsv' - output = _run(query_cmd, print_command_to_run = False, print_errors = False) + output = run(query_cmd, print_command_to_run = False, print_errors = False) if output.success and output.text.strip(): rg_names = [name.strip() for name in output.text.strip().split('\n') if name.strip()] @@ -374,7 +351,7 @@ def create_resource_group(rg_name: str, resource_group_location: str | None = No escaped_value = value.replace('"', '\\"') if isinstance(value, str) else str(value) tag_string += f' {key}=\"{escaped_value}\"' - _run(f'az group create --name {rg_name} --location {resource_group_location} --tags {tag_string}', + run(f'az group create --name {rg_name} --location {resource_group_location} --tags {tag_string}', f"Resource group '{rg_name}' created", f"Failed to create the resource group '{rg_name}'", False, False, False, False) @@ -420,7 +397,7 @@ def does_resource_group_exist(resource_group_name: str) -> bool: bool: True if the resource group exists, False otherwise. """ - output = _run(f'az group show --name {resource_group_name} -o json', print_command_to_run = False, print_errors = False) + output = run(f'az group show --name {resource_group_name} -o json', print_command_to_run = False, print_errors = False) return output.success @@ -435,7 +412,7 @@ def get_resource_group_location(resource_group_name: str) -> str | None: str | None: The location of the resource group if found, otherwise None. """ - output = _run(f'az group show --name {resource_group_name} --query "location" -o tsv', print_command_to_run = False, print_errors = False) + output = run(f'az group show --name {resource_group_name} --query "location" -o tsv', print_command_to_run = False, print_errors = False) if output.success and output.text.strip(): return output.text.strip() @@ -453,8 +430,8 @@ def get_account_info() -> Tuple[str, str, str, str]: Exception: If account information cannot be retrieved. """ - account_show_output = _run('az account show', 'Retrieved az account', 'Failed to get the current az account', print_command_to_run = False) - ad_user_show_output = _run('az ad signed-in-user show', 'Retrieved az ad signed-in-user', 'Failed to get the current az ad signed-in-user', print_command_to_run = False) + account_show_output = run('az account show', 'Retrieved az account', 'Failed to get the current az account', print_command_to_run = False) + ad_user_show_output = run('az ad signed-in-user show', 'Retrieved az ad signed-in-user', 'Failed to get the current az ad signed-in-user', print_command_to_run = False) if account_show_output.success and account_show_output.json_data and ad_user_show_output.success and ad_user_show_output.json_data: current_user = account_show_output.json_data['user']['name'] @@ -507,14 +484,14 @@ def get_frontdoor_url(deployment_name: INFRASTRUCTURE, rg_name: str) -> str | No afd_endpoint_url: str | None = None if deployment_name == INFRASTRUCTURE.AFD_APIM_PE: - output = _run(f'az afd profile list -g {rg_name} -o json') + output = run(f'az afd profile list -g {rg_name} -o json') if output.success and output.json_data: afd_profile_name = output.json_data[0]['name'] print_ok(f'Front Door Profile Name: {afd_profile_name}', blank_above = False) if afd_profile_name: - output = _run(f'az afd endpoint list -g {rg_name} --profile-name {afd_profile_name} -o json') + output = run(f'az afd endpoint list -g {rg_name} --profile-name {afd_profile_name} -o json') if output.success and output.json_data: afd_hostname = output.json_data[0]['hostName'] @@ -542,7 +519,7 @@ def get_apim_url(rg_name: str) -> str | None: apim_endpoint_url: str | None = None - output = _run(f'az apim list -g {rg_name} -o json', print_command_to_run = False) + output = run(f'az apim list -g {rg_name} -o json', print_command_to_run = False) if output.success and output.json_data: apim_gateway_url = output.json_data[0]['gatewayUrl'] @@ -573,7 +550,7 @@ def get_appgw_endpoint(rg_name: str) -> Tuple[str | None, str | None]: public_ip: str | None = None # Get Application Gateway details - output = _run(f'az network application-gateway list -g {rg_name} -o json', print_command_to_run = False) + output = run(f'az network application-gateway list -g {rg_name} -o json', print_command_to_run = False) if output.success and output.json_data: appgw_name = output.json_data[0]['name'] @@ -601,7 +578,7 @@ def get_appgw_endpoint(rg_name: str) -> Tuple[str | None, str | None]: public_ip_name = public_ip_id.split('/')[-1] # Get public IP details - ip_output = _run(f'az network public-ip show -g {rg_name} -n {public_ip_name} -o json', print_command_to_run = False) + ip_output = run(f'az network public-ip show -g {rg_name} -n {public_ip_name} -o json', print_command_to_run = False) if ip_output.success and ip_output.json_data: public_ip = ip_output.json_data.get('ipAddress') @@ -661,7 +638,7 @@ def get_unique_suffix_for_resource_group(rg_name: str) -> str: try: deployment_name = f'get-suffix-{int(time.time())}' - output = _run( + output = run( f'az deployment group create --name {deployment_name} --resource-group {rg_name} --template-file "{template_path}" --query "properties.outputs.suffix.value" -o tsv', print_command_to_run = False, print_errors = False diff --git a/shared/python/console.py b/shared/python/console.py index 8663df1..d65042e 100644 --- a/shared/python/console.py +++ b/shared/python/console.py @@ -32,18 +32,6 @@ # Thread-safe print lock _print_lock = threading.Lock() -# Explicitly define what is exported with 'from console import *' -__all__ = [ - # Constants - 'BOLD_B', 'BOLD_G', 'BOLD_R', 'BOLD_Y', 'BOLD_C', 'BOLD_M', 'BOLD_W', 'RESET', - 'THREAD_COLORS', 'CONSOLE_WIDTH', - # Private (but re-exported for backward compatibility) - '_print_lock', '_print_log', - # Public functions - 'print_command', 'print_error', 'print_info', 'print_message', - 'print_ok', 'print_success', 'print_warning', 'print_val', 'print_header', -] - # ------------------------------ # PRIVATE METHODS diff --git a/shared/python/infrastructures.py b/shared/python/infrastructures.py index fa0c1b0..ecfa03e 100644 --- a/shared/python/infrastructures.py +++ b/shared/python/infrastructures.py @@ -12,22 +12,10 @@ import requests # APIM Samples imports -from apimtypes import ( - API, - APIM_SKU, - APIMNetworkMode, - GET_APIOperation, - HELLO_WORLD_XML_POLICY_PATH, - INFRASTRUCTURE, - PolicyFragment, -) -from console import ( - BOLD_R, BOLD_Y, RESET, THREAD_COLORS, _print_lock, _print_log, - print_error, print_info, print_message, print_ok, print_success, print_warning -) +from apimtypes import API, APIM_SKU, APIMNetworkMode, GET_APIOperation, HELLO_WORLD_XML_POLICY_PATH, INFRASTRUCTURE, PolicyFragment +from console import BOLD_R, BOLD_Y, RESET, THREAD_COLORS, _print_lock, _print_log, print_error, print_info, print_message, print_ok, print_success, print_warning import azure_resources as az import utils -from utils import Output # ------------------------------ @@ -141,7 +129,7 @@ def _verify_infrastructure(self, rg_name: str) -> bool: print('✅ Resource group verified') # Get APIM service details - output = utils.run(f'az apim list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) + output = az.run(f'az apim list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) if output.success and output.json_data: apim_name = output.json_data.get('name') @@ -149,7 +137,7 @@ def _verify_infrastructure(self, rg_name: str) -> bool: print(f'✅ APIM Service verified: {apim_name}') # Get API count - api_output = utils.run(f'az apim api list --service-name {apim_name} -g {rg_name} --query "length(@)"', + api_output = az.run(f'az apim api list --service-name {apim_name} -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) if api_output.success: @@ -160,7 +148,7 @@ def _verify_infrastructure(self, rg_name: str) -> bool: if api_count > 0: try: # Get subscription key for testing - sub_output = utils.run(f'az apim subscription list --service-name {apim_name} -g {rg_name} --query "[0].primaryKey" -o tsv', + sub_output = az.run(f'az apim subscription list --service-name {apim_name} -g {rg_name} --query "[0].primaryKey" -o tsv', print_command_to_run = False, print_errors = False) if sub_output.success and sub_output.text.strip(): @@ -268,7 +256,7 @@ def deploy_infrastructure(self, is_update: bool = False) -> 'utils.Output': # Run the deployment directly main_bicep_path = infra_dir / 'main.bicep' - output = utils.run( + output = az.run( f'az deployment group create --name {self.infra.value} --resource-group {self.rg_name} --template-file "{main_bicep_path}" --parameters "{params_file_path}" --query "properties.outputs"', f"Deployment '{self.infra.value}' succeeded", f"Deployment '{self.infra.value}' failed.", @@ -332,7 +320,7 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: """ try: # Get Container Apps count - aca_output = utils.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) + aca_output = az.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) if aca_output.success: aca_count = int(aca_output.text.strip()) @@ -385,7 +373,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: try: # Get all pending private endpoint connections - output = utils.run( + output = az.run( f'az network private-endpoint-connection list --id {apim_service_id} --query "[?contains(properties.privateLinkServiceConnectionState.status, \'Pending\')]" -o json', print_command_to_run = False, print_errors = False @@ -414,7 +402,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: conn_name = conn.get('name', '') print(f' Approving {i}/{total}: {conn_name}') - approve_result = utils.run( + approve_result = az.run( f'az network private-endpoint-connection approve --id {conn_id} --description "Approved by infrastructure deployment"', f'✅ Private Link Connection approved: {conn_name}', f'❌ Failed to approve Private Link Connection: {conn_name}', @@ -466,7 +454,7 @@ def _disable_apim_public_access(self) -> bool: # Run the second deployment main_bicep_path = infra_dir / 'main.bicep' - output = utils.run( + output = az.run( f'az deployment group create --name {self.infra.value}-lockdown --resource-group {self.rg_name} --template-file "{main_bicep_path}" --parameters "{params_file_path}" --query "properties.outputs"', '✅ Public access disabled successfully', '❌ Failed to disable public access', @@ -513,7 +501,7 @@ def _verify_apim_connectivity(self, apim_gateway_url: str) -> bool: print(' ℹ️ Continuing deployment - this may be expected during infrastructure setup') return True # Continue anyway - def deploy_infrastructure(self, is_update: bool = False) -> Output: + def deploy_infrastructure(self, is_update: bool = False) -> utils.Output: """ Deploy the AFD-APIM-PE infrastructure with the required multi-step process. @@ -588,14 +576,14 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: """ try: # Check Front Door - afd_output = utils.run(f'az afd profile list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) + afd_output = az.run(f'az afd profile list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) if afd_output.success and afd_output.json_data: afd_name = afd_output.json_data.get('name') print(f'✅ Azure Front Door verified: {afd_name}') # Check Container Apps if they exist (optional for this infrastructure) - aca_output = utils.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) + aca_output = az.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) if aca_output.success: aca_count = int(aca_output.text.strip()) @@ -604,10 +592,10 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: # Verify private endpoint connections (optional - don't fail if it errors) try: - apim_output = utils.run(f'az apim list -g {rg_name} --query "[0].id" -o tsv', print_command_to_run = False, print_errors = False) + apim_output = az.run(f'az apim list -g {rg_name} --query "[0].id" -o tsv', print_command_to_run = False, print_errors = False) if apim_output.success and apim_output.text.strip(): apim_id = apim_output.text.strip() - pe_output = utils.run(f'az network private-endpoint-connection list --id {apim_id} --query "length(@)"', print_command_to_run = False, print_errors = False) + pe_output = az.run(f'az network private-endpoint-connection list --id {apim_id} --query "length(@)"', print_command_to_run = False, print_errors = False) if pe_output.success: pe_count = int(pe_output.text.strip()) print(f'✅ Private endpoint connections: {pe_count}') @@ -654,7 +642,7 @@ def _create_keyvault_certificate(self, key_vault_name: str) -> bool: print(f' Domain : {self.DOMAIN_NAME}') # Check if certificate already exists - check_output = utils.run( + check_output = az.run( f'az keyvault certificate show --vault-name {key_vault_name} --name {self.CERT_NAME} -o json', print_command_to_run = False, print_errors = False @@ -691,7 +679,7 @@ def _create_keyvault_certificate(self, key_vault_name: str) -> bool: # Create the certificate using Azure CLI # Use escaped double quotes for Windows PowerShell compatibility escaped_policy = cert_policy.replace('"', '\\"') - create_output = utils.run( + create_output = az.run( f'az keyvault certificate create --vault-name {key_vault_name} --name {self.CERT_NAME} --policy "{escaped_policy}"', '✅ Certificate created successfully in Key Vault', '❌ Failed to create certificate in Key Vault', @@ -733,7 +721,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: try: # Get all pending private endpoint connections - output = utils.run( + output = az.run( f'az network private-endpoint-connection list --id {apim_service_id} --query "[?contains(properties.privateLinkServiceConnectionState.status, \'Pending\')]" -o json', print_command_to_run = False, print_errors = False @@ -763,7 +751,7 @@ def _approve_private_link_connections(self, apim_service_id: str) -> bool: conn_name = conn.get('name', '') print(f' Approving {i}/{total}: {conn_name}') - approve_result = utils.run( + approve_result = az.run( f'az network private-endpoint-connection approve --id {conn_id} --description "Approved by infrastructure deployment"', f'✅ Private Link Connection approved: {conn_name}', f'❌ Failed to approve Private Link Connection: {conn_name}', @@ -815,7 +803,7 @@ def _disable_apim_public_access(self) -> bool: # Run the second deployment main_bicep_path = infra_dir / 'main.bicep' - output = utils.run( + output = az.run( f'az deployment group create --name {self.infra.value}-lockdown --resource-group {self.rg_name} --template-file "{main_bicep_path}" --parameters "{params_file_path}" --query "properties.outputs"', '✅ Public access disabled successfully', '❌ Failed to disable public access', @@ -864,7 +852,7 @@ def _verify_apim_connectivity(self, apim_gateway_url: str) -> bool: def _create_keyvault(self, key_vault_name: str) -> bool: # Check if Key Vault already exists - check_kv = utils.run( + check_kv = az.run( f'az keyvault show --name {key_vault_name} --resource-group {self.rg_name} -o json', print_command_to_run = False, print_errors = False @@ -873,7 +861,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: if not check_kv.success: # Create Key Vault via Azure CLI with RBAC authorization (consistent with Bicep module) print(f' Creating Key Vault: {key_vault_name}') - utils.run( + az.run( f'az keyvault create --name {key_vault_name} --resource-group {self.rg_name} --location {self.rg_location} --enable-rbac-authorization true', f'✅ Key Vault created: {key_vault_name}', '❌ Failed to create Key Vault', @@ -883,7 +871,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: #Assign Key Vault Certificates Officer role to current user for certificate creation # Key Vault Certificates Officer role - assign_kv_role = utils.run( + assign_kv_role = az.run( f'az role assignment create --role "Key Vault Certificates Officer" --assignee {self.current_user_id} --scope /subscriptions/{self.subscription_id}/resourceGroups/{self.rg_name}/providers/Microsoft.KeyVault/vaults/{key_vault_name}', print_command_to_run = False, print_errors = False @@ -900,7 +888,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: return True - def deploy_infrastructure(self, is_update: bool = False) -> Output: + def deploy_infrastructure(self, is_update: bool = False) -> utils.Output: """ Deploy the APPGW-APIM-PE infrastructure with the required multi-step process. @@ -1002,14 +990,14 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: """ try: # Check Application Gateway - appgw_output = utils.run(f'az network application-gateway list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) + appgw_output = az.run(f'az network application-gateway list -g {rg_name} --query "[0]" -o json', print_command_to_run = False, print_errors = False) if appgw_output.success and appgw_output.json_data: appgw_name = appgw_output.json_data.get('name') print(f'✅ Application Gateway verified: {appgw_name}') # Check Container Apps if they exist (optional for this infrastructure) - aca_output = utils.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) + aca_output = az.run(f'az containerapp list -g {rg_name} --query "length(@)"', print_command_to_run = False, print_errors = False) if aca_output.success: aca_count = int(aca_output.text.strip()) @@ -1018,10 +1006,10 @@ def _verify_infrastructure_specific(self, rg_name: str) -> bool: # Verify private endpoint connections (optional - don't fail if it errors) try: - apim_output = utils.run(f'az apim list -g {rg_name} --query "[0].id" -o tsv', print_command_to_run = False, print_errors = False) + apim_output = az.run(f'az apim list -g {rg_name} --query "[0].id" -o tsv', print_command_to_run = False, print_errors = False) if apim_output.success and apim_output.text.strip(): apim_id = apim_output.text.strip() - pe_output = utils.run(f'az network private-endpoint-connection list --id {apim_id} --query "length(@)"', print_command_to_run = False, print_errors = False) + pe_output = az.run(f'az network private-endpoint-connection list --id {apim_id} --query "length(@)"', print_command_to_run = False, print_errors = False) if pe_output.success: pe_count = int(pe_output.text.strip()) print(f'✅ Private endpoint connections: {pe_count}') @@ -1078,12 +1066,12 @@ def _cleanup_single_resource(resource: dict) -> tuple[bool, str]: return False, f"Unknown resource type: {resource_type}" # Execute delete - output = utils.run(delete_cmd, f"{resource_type} '{resource_name}' deleted", f"Failed to delete {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + output = az.run(delete_cmd, f"{resource_type} '{resource_name}' deleted", f"Failed to delete {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) if not output.success: return False, f"Delete failed for {resource_name}" # Execute purge - output = utils.run(purge_cmd, f"{resource_type} '{resource_name}' purged", f"Failed to purge {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) + output = az.run(purge_cmd, f"{resource_type} '{resource_name}' purged", f"Failed to purge {resource_type} '{resource_name}'", print_command_to_run = False, print_errors = False) if not output.success: return False, f"Purge failed for {resource_name}" @@ -1229,13 +1217,13 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: print_info(f'Resource group : {rg_name}') # Show the deployment details (if it exists) - output = utils.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Deployment not found (may be empty resource group)', print_command_to_run = False, print_errors = False) + output = az.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Deployment not found (may be empty resource group)', print_command_to_run = False, print_errors = False) # Collect all resources that need to be deleted and purged resources_to_cleanup = [] # List CognitiveService accounts - output = utils.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + output = az.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1246,7 +1234,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: }) # List APIM resources - output = utils.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + output = az.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1257,7 +1245,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: }) # List Key Vault resources - output = utils.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + output = az.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1276,7 +1264,7 @@ def _cleanup_resources(deployment_name: str, rg_name: str) -> None: # Delete the resource group last (always attempt this, even if deployment doesn't exist) print_message(f"Deleting resource group '{rg_name}'...") - output = utils.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) + output = az.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) print_message('Cleanup completed.') @@ -1338,14 +1326,14 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: _print_log(f"{thread_prefix}Resource group : {rg_name}", '👉🏽 ', thread_color) # Show the deployment details - output = utils.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) + output = az.run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False, print_errors = False) if output.success and output.json_data: # Collect all resources that need to be deleted and purged resources_to_cleanup = [] # List CognitiveService accounts - output = utils.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) + output = az.run(f' az cognitiveservices account list -g {rg_name}', 'Listed CognitiveService accounts', 'Failed to list CognitiveService accounts', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1356,7 +1344,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: }) # List APIM resources - output = utils.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) + output = az.run(f' az apim list -g {rg_name}', 'Listed APIM resources', 'Failed to list APIM resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1367,7 +1355,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: }) # List Key Vault resources - output = utils.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) + output = az.run(f' az keyvault list -g {rg_name}', 'Listed Key Vault resources', 'Failed to list Key Vault resources', print_command_to_run = False, print_errors = False) if output.success and output.json_data: for resource in output.json_data: resources_to_cleanup.append({ @@ -1389,7 +1377,7 @@ def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: # Delete the resource group last with _print_lock: _print_log(f"{thread_prefix}Deleting resource group '{rg_name}'...", 'ℹ️ ', thread_color, show_time=True) - output = utils.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) + output = az.run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False, print_errors = False) with _print_lock: _print_log(f"{thread_prefix}Cleanup completed.", 'ℹ️ ', thread_color, show_time=True) diff --git a/shared/python/utils.py b/shared/python/utils.py index 33967dd..6c2e592 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -18,37 +18,8 @@ # APIM Samples imports import azure_resources as az from apimtypes import APIM_SKU, HTTP_VERB, INFRASTRUCTURE, Endpoints, Output, get_project_root +from console import print_error, print_info, print_message, print_success, print_warning, print_val -# ------------------------------ -# RE-EXPORTS (BACKWARD COMPATIBILITY) -# ------------------------------ -# -# The following imports are re-exported from the modules that are now split out from utils. -# The re-exports are in place to maintain backward compatibility with existing code. -# For new code, please import directly from the relevant modules. -from console import ( - print_error, - print_info, - print_message, - print_success, - print_warning, - print_val, -) -from azure_resources import ( - check_apim_blob_permissions, - cleanup_old_jwt_signing_keys, - create_resource_group, - find_infrastructure_instances, - get_apim_url, - get_appgw_endpoint, - get_frontdoor_url, - get_infra_rg_name, - get_resource_group_location, - _run as run -) - - -does_resource_group_exist = az.does_resource_group_exist # ------------------------------ # HELPER FUNCTIONS @@ -138,7 +109,7 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow try: # For infrastructure notebooks, check if update is allowed and handle user choice if allow_update: - rg_name = get_infra_rg_name(self.deployment, self.index) + rg_name = az.get_infra_rg_name(self.deployment, self.index) if az.does_resource_group_exist(rg_name): # Infrastructure exists, show update dialog try: @@ -157,7 +128,7 @@ def create_infrastructure(self, bypass_infrastructure_check: bool = False, allow raise SystemExit("User cancelled deployment") from exc # Check infrastructure existence for the normal flow - infrastructure_exists = az.does_resource_group_exist(get_infra_rg_name(self.deployment, self.index)) if not allow_update else False + infrastructure_exists = az.does_resource_group_exist(az.get_infra_rg_name(self.deployment, self.index)) if not allow_update else False if bypass_infrastructure_check or not infrastructure_exists: # Map infrastructure types to their folder names @@ -288,7 +259,7 @@ def _clean_up_jwt(self, apim_name: str) -> None: """Clean up old JWT signing keys after successful deployment.""" # Clean up old JWT signing keys after successful deployment - if not cleanup_old_jwt_signing_keys(apim_name, self.rg_name, self.jwt_key_name): + if not az.cleanup_old_jwt_signing_keys(apim_name, self.rg_name, self.jwt_key_name): print_warning('JWT key cleanup failed, but deployment was successful. Old keys may need manual cleanup.') def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | None]: @@ -309,13 +280,13 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | available_options = [] for infra in self.supported_infrastructures: - infra_options = find_infrastructure_instances(infra) + infra_options = az.find_infrastructure_instances(infra) available_options.extend(infra_options) # Check if the desired infrastructure/index combination exists - desired_rg_name = get_infra_rg_name(self.deployment, self._get_current_index()) + desired_rg_name = az.get_infra_rg_name(self.deployment, self._get_current_index()) desired_exists = any( - get_infra_rg_name(infra, idx) == desired_rg_name + az.get_infra_rg_name(infra, idx) == desired_rg_name for infra, idx in available_options ) @@ -364,10 +335,10 @@ def _query_and_select_infrastructure(self) -> tuple[INFRASTRUCTURE | None, int | for infra, index in available_options: index_str = index if index is not None else 'N/A' - rg_name = get_infra_rg_name(infra, index) + rg_name = az.get_infra_rg_name(infra, index) if QUERY_RG_LOCATION: - rg_location = get_resource_group_location(rg_name) + rg_location = az.get_resource_group_location(rg_name) print(f' {option_counter:>3} {infra.value:<20} {index_str:>8} {rg_name:<35} {rg_location:<15}') else: print(f' {option_counter:>3} {infra.value:<20} {index_str:>8} {rg_name:<35}') @@ -470,7 +441,7 @@ def deploy_sample(self, bicep_parameters: dict) -> Output: # Update the notebook helper with the selected infrastructure self.deployment = selected_deployment self.index = selected_index - self.rg_name = get_infra_rg_name(self.deployment, self.index) + self.rg_name = az.get_infra_rg_name(self.deployment, self.index) # Verify the updates were applied correctly print('📝 Updated infrastructure variables') @@ -583,7 +554,7 @@ def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: st """ # Create the resource group if doesn't exist - create_resource_group(rg_name, rg_location, rg_tags) + az.create_resource_group(rg_name, rg_location, rg_tags) if hasattr(deployment, 'value'): deployment_name = deployment.value @@ -626,7 +597,7 @@ def create_bicep_deployment_group(rg_name: str, rg_location: str, deployment: st cmd += ' --debug' print('\nDeploying bicep...\n') - return run(cmd, f"Deployment '{deployment_name}' succeeded", f"Deployment '{deployment_name}' failed.", print_command_to_run = False) + return az.run(cmd, f"Deployment '{deployment_name}' succeeded", f"Deployment '{deployment_name}' failed.", print_command_to_run = False) # TODO: Reconcile this with apimtypes.py get_project_root def find_project_root() -> str: @@ -776,7 +747,7 @@ def does_infrastructure_exist(infrastructure: INFRASTRUCTURE, index: int, allow_ print(f'� Debug: does_infrastructure_exist called with allow_update_option={allow_update_option}') print('�🔍 Checking if infrastructure already exists...') - rg_name = get_infra_rg_name(infrastructure, index) + rg_name = az.get_infra_rg_name(infrastructure, index) if az.does_resource_group_exist(rg_name): print(f'✅ Infrastructure already exists: {rg_name}\n') @@ -1013,7 +984,7 @@ def wait_for_apim_blob_permissions(apim_name: str, storage_account_name: str, re print_info('Azure role assignments can take several minutes to propagate across Azure AD. This check will verify that APIM can access the blob storage before proceeding with tests.\n') - success = check_apim_blob_permissions(apim_name, storage_account_name, resource_group_name, max_wait_minutes) + success = az.check_apim_blob_permissions(apim_name, storage_account_name, resource_group_name, max_wait_minutes) if success: print_success('Permission check passed! Ready to proceed with secure blob access tests.') @@ -1030,7 +1001,7 @@ def test_url_preflight_check(deployment: INFRASTRUCTURE, rg_name: str, apim_gate print_message('Checking if the infrastructure architecture deployment uses Azure Front Door.', blank_above = True) - afd_endpoint_url = get_frontdoor_url(deployment, rg_name) + afd_endpoint_url = az.get_frontdoor_url(deployment, rg_name) if afd_endpoint_url: endpoint_url = afd_endpoint_url @@ -1046,9 +1017,9 @@ def get_endpoints(deployment: INFRASTRUCTURE, rg_name: str) -> Endpoints: endpoints = Endpoints(deployment) - endpoints.afd_endpoint_url = get_frontdoor_url(deployment, rg_name) - endpoints.apim_endpoint_url = get_apim_url(rg_name) - endpoints.appgw_hostname, endpoints.appgw_public_ip = get_appgw_endpoint(rg_name) + endpoints.afd_endpoint_url = az.get_frontdoor_url(deployment, rg_name) + endpoints.apim_endpoint_url = az.get_apim_url(rg_name) + endpoints.appgw_hostname, endpoints.appgw_public_ip = az.get_appgw_endpoint(rg_name) return endpoints diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py index 620fc60..e52b369 100644 --- a/tests/python/test_azure_resources.py +++ b/tests/python/test_azure_resources.py @@ -41,7 +41,7 @@ def test_get_azure_role_guid_failure(): def test_does_resource_group_exist_true(): """Test checking if resource group exists - returns True.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, '{"name": "test-rg"}') result = az.does_resource_group_exist('test-rg') @@ -57,7 +57,7 @@ def test_does_resource_group_exist_true(): def test_does_resource_group_exist_false(): """Test checking if resource group exists - returns False.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'ResourceGroupNotFound') result = az.does_resource_group_exist('nonexistent-rg') @@ -68,7 +68,7 @@ def test_does_resource_group_exist_false(): def test_get_resource_group_location_success(): """Test successful retrieval of resource group location.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, 'eastus2\n') result = az.get_resource_group_location('test-rg') @@ -84,7 +84,7 @@ def test_get_resource_group_location_success(): def test_get_resource_group_location_failure(): """Test get_resource_group_location returns None on failure.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'error message') result = az.get_resource_group_location('nonexistent-rg') @@ -95,7 +95,7 @@ def test_get_resource_group_location_failure(): def test_get_resource_group_location_empty(): """Test get_resource_group_location returns None on empty response.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, '') result = az.get_resource_group_location('test-rg') @@ -110,7 +110,7 @@ def test_get_resource_group_location_empty(): def test_get_account_info_success(): """Test successful retrieval of account information.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: account_output = Output(True, '{}') account_output.json_data = { 'user': {'name': 'test.user@example.com'}, @@ -134,7 +134,7 @@ def test_get_account_info_success(): def test_get_account_info_failure(): """Test get_account_info raises exception on failure.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'authentication error') with pytest.raises(Exception) as exc_info: @@ -146,7 +146,7 @@ def test_get_account_info_failure(): def test_get_account_info_no_json(): """Test get_account_info raises exception when no JSON data.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: output = Output(True, 'some text') output.json_data = None mock_run.return_value = output @@ -199,7 +199,7 @@ def test_get_deployment_name_current_directory(mock_getcwd, mock_basename, mock_ def test_get_frontdoor_url_afd_success(): """Test successful Front Door URL retrieval.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: # Create mock outputs profile_output = Output(True, '') profile_output.json_data = [{"name": "test-afd"}] @@ -223,7 +223,7 @@ def test_get_frontdoor_url_afd_success(): def test_get_frontdoor_url_wrong_infrastructure(): """Test Front Door URL with wrong infrastructure type.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: result = az.get_frontdoor_url(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') assert result is None @@ -233,7 +233,7 @@ def test_get_frontdoor_url_wrong_infrastructure(): def test_get_frontdoor_url_no_profile(): """Test Front Door URL when no profile found.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'No profiles found') result = az.get_frontdoor_url(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') @@ -244,7 +244,7 @@ def test_get_frontdoor_url_no_profile(): def test_get_frontdoor_url_no_endpoints(): """Test Front Door URL when profile exists but no endpoints.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: profile_output = Output(True, '') profile_output.json_data = [{'name': 'test-afd'}] endpoint_output = Output(False, 'No endpoints found') @@ -262,7 +262,7 @@ def test_get_frontdoor_url_no_endpoints(): def test_get_apim_url_success(): """Test successful APIM URL retrieval.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, '') mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': 'https://test-apim.azure-api.net'}] @@ -278,7 +278,7 @@ def test_get_apim_url_success(): def test_get_apim_url_failure(): """Test APIM URL retrieval failure.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'No APIM services found') result = az.get_apim_url('test-rg') @@ -289,7 +289,7 @@ def test_get_apim_url_failure(): def test_get_apim_url_no_gateway(): """Test APIM URL when service exists but no gateway URL.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, '') mock_run.return_value.json_data = [{'name': 'test-apim', 'gatewayUrl': None}] @@ -305,7 +305,7 @@ def test_get_apim_url_no_gateway(): def test_get_appgw_endpoint_success(): """Test successful Application Gateway endpoint retrieval.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: appgw_output = Output(True, '') appgw_output.json_data = [{ 'name': 'test-appgw', @@ -333,7 +333,7 @@ def test_get_appgw_endpoint_success(): def test_get_appgw_endpoint_no_gateway(): """Test Application Gateway endpoint when no gateway found.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'No gateways found') hostname, ip = az.get_appgw_endpoint('test-rg') @@ -345,7 +345,7 @@ def test_get_appgw_endpoint_no_gateway(): def test_get_appgw_endpoint_no_listeners(): """Test Application Gateway endpoint with no HTTP listeners.""" - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, '') mock_run.return_value.json_data = [{ 'name': 'test-appgw', @@ -410,7 +410,7 @@ def test_get_unique_suffix_for_resource_group_success(mock_unlink, mock_time, mo mock_file.name = '/tmp/template.json' mock_tempfile.return_value.__enter__.return_value = mock_file - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(True, 'abc123def456\n') result = az.get_unique_suffix_for_resource_group('test-rg') @@ -431,7 +431,7 @@ def test_get_unique_suffix_for_resource_group_failure(mock_unlink, mock_time, mo mock_file.name = '/tmp/template.json' mock_tempfile.return_value.__enter__.return_value = mock_file - with patch('azure_resources._run') as mock_run: + with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'Deployment failed') result = az.get_unique_suffix_for_resource_group('test-rg') diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index 96a26b7..d3a3e82 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -9,7 +9,6 @@ import console import infrastructures from apimtypes import INFRASTRUCTURE, APIM_SKU, APIMNetworkMode, API, PolicyFragment, HTTP_VERB, Output -import utils # ------------------------------ @@ -35,14 +34,6 @@ def mock_utils(): mock_utils.determine_shared_policy_path.return_value = '/mock/path/policy.xml' mock_utils.verify_infrastructure.return_value = True - # Mock the run command with proper return object - mock_output = Mock() - mock_output.success = True - mock_output.json_data = {'outputs': 'test'} - mock_output.get.return_value = 'https://test-apim.azure-api.net' - mock_output.getJson.return_value = ['api1', 'api2'] - mock_utils.run.return_value = mock_output - yield mock_utils @@ -57,6 +48,14 @@ def mock_az(): mock_az.get_account_info.return_value = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription') mock_az.get_unique_suffix_for_resource_group.return_value = 'abc123def456' + # Mock the run command with proper return object + mock_output = Mock() + mock_output.success = True + mock_output.json_data = {'outputs': 'test'} + mock_output.get.return_value = 'https://test-apim.azure-api.net' + mock_output.getJson.return_value = ['api1', 'api2'] + mock_az.run.return_value = mock_output + yield mock_az @pytest.fixture @@ -365,13 +364,13 @@ def test_base_infrastructure_verification_success(mock_utils, mock_az): mock_sub_output.success = True mock_sub_output.text = 'test-subscription-key' - mock_utils.run.side_effect = [mock_apim_output, mock_api_output, mock_sub_output] + mock_az.run.side_effect = [mock_apim_output, mock_api_output, mock_sub_output] result = infra._verify_infrastructure('test-rg') assert result is True mock_az.does_resource_group_exist.assert_called_once_with('test-rg') - assert mock_utils.run.call_count >= 2 # At least APIM list and API count + assert mock_az.run.call_count >= 2 # At least APIM list and API count @pytest.mark.unit def test_base_infrastructure_verification_missing_rg(mock_utils, mock_az): @@ -407,7 +406,7 @@ def test_base_infrastructure_verification_missing_apim(mock_utils, mock_az): mock_apim_output.success = False mock_apim_output.json_data = None - mock_utils.run.return_value = mock_apim_output + mock_az.run.return_value = mock_apim_output result = infra._verify_infrastructure('test-rg') @@ -432,7 +431,7 @@ def test_infrastructure_specific_verification_base(mock_utils): # ------------------------------ @pytest.mark.unit -def test_apim_aca_infrastructure_verification_success(mock_utils): +def test_apim_aca_infrastructure_verification_success(mock_az): """Test APIM-ACA infrastructure-specific verification success.""" infra = infrastructures.ApimAcaInfrastructure( rg_location=TEST_LOCATION, @@ -445,19 +444,19 @@ def test_apim_aca_infrastructure_verification_success(mock_utils): mock_aca_output.success = True mock_aca_output.text = '3' # 3 Container Apps - mock_utils.run.return_value = mock_aca_output + mock_az.run.return_value = mock_aca_output result = infra._verify_infrastructure_specific('test-rg') assert result is True - mock_utils.run.assert_called_once_with( + mock_az.run.assert_called_once_with( 'az containerapp list -g test-rg --query "length(@)"', print_command_to_run=False, print_errors=False ) @pytest.mark.unit -def test_apim_aca_infrastructure_verification_failure(mock_utils): +def test_apim_aca_infrastructure_verification_failure(mock_az): """Test APIM-ACA infrastructure-specific verification failure.""" infra = infrastructures.ApimAcaInfrastructure( rg_location=TEST_LOCATION, @@ -469,7 +468,7 @@ def test_apim_aca_infrastructure_verification_failure(mock_utils): mock_aca_output = Mock() mock_aca_output.success = False - mock_utils.run.return_value = mock_aca_output + mock_az.run.return_value = mock_aca_output result = infra._verify_infrastructure_specific('test-rg') @@ -481,7 +480,7 @@ def test_apim_aca_infrastructure_verification_failure(mock_utils): # ------------------------------ @pytest.mark.unit -def test_afd_apim_infrastructure_verification_success(mock_utils): +def test_afd_apim_infrastructure_verification_success(mock_az): """Test AFD-APIM-PE infrastructure-specific verification success.""" infra = infrastructures.AfdApimAcaInfrastructure( rg_location=TEST_LOCATION, @@ -504,16 +503,16 @@ def test_afd_apim_infrastructure_verification_success(mock_utils): mock_apim_output.success = True mock_apim_output.text = 'apim-resource-id' - mock_utils.run.side_effect = [mock_afd_output, mock_aca_output, mock_apim_output] + mock_az.run.side_effect = [mock_afd_output, mock_aca_output, mock_apim_output] result = infra._verify_infrastructure_specific('test-rg') assert result is True # Allow for 2-3 calls (3rd call is optional for private endpoint verification) - assert mock_utils.run.call_count >= 2 + assert mock_az.run.call_count >= 2 @pytest.mark.unit -def test_afd_apim_infrastructure_verification_no_afd(mock_utils): +def test_afd_apim_infrastructure_verification_no_afd(mock_az): """Test AFD-APIM-PE infrastructure-specific verification with missing AFD.""" infra = infrastructures.AfdApimAcaInfrastructure( rg_location=TEST_LOCATION, @@ -526,7 +525,7 @@ def test_afd_apim_infrastructure_verification_no_afd(mock_utils): mock_afd_output.success = False mock_afd_output.json_data = None - mock_utils.run.return_value = mock_afd_output + mock_az.run.return_value = mock_afd_output result = infra._verify_infrastructure_specific('test-rg') @@ -637,10 +636,7 @@ def verify_infrastructure(self) -> bool: # Verify the deployment process mock_az.create_resource_group.assert_called_once() - # The utils.run method is now called multiple times (deployment + verification steps) - assert mock_utils.run.call_count >= 1 # At least one call for deployment - # Note: utils.verify_infrastructure is currently commented out in the actual code - # mock_utils.verify_infrastructure.assert_called_once() + assert mock_az.run.call_count >= 1 # At least one call for deployment # Verify directory changes - just check that chdir was called twice (to infra dir and back) assert mock_chdir.call_count == 2 @@ -669,7 +665,7 @@ def test_deploy_infrastructure_failure(mock_path_class, mock_chdir, mock_getcwd, # Mock failed deployment mock_output = Mock() mock_output.success = False - mock_utils.run.return_value = mock_output + mock_az.run.return_value = mock_output # Create a concrete subclass for testing class TestInfrastructure(infrastructures.Infrastructure): @@ -690,7 +686,7 @@ def verify_infrastructure(self) -> bool: # Verify the deployment process was attempted mock_az.create_resource_group.assert_called_once() - mock_utils.run.assert_called_once() + mock_az.run.assert_called_once() # Note: utils.verify_infrastructure is currently commented out in the actual code # mock_utils.verify_infrastructure.assert_not_called() # Should not be called on failure @@ -967,7 +963,7 @@ def test_policy_fragment_creation_robustness(mock_utils): # ------------------------------ def test_cleanup_resources_smoke(monkeypatch): - monkeypatch.setattr(utils, 'run', lambda *a, **kw: MagicMock(success=True, json_data={})) + monkeypatch.setattr(infrastructures.az, 'run', lambda *a, **kw: MagicMock(success=True, json_data={})) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) @@ -1033,7 +1029,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True # Default successful response for delete/purge operations return Output(success=True, text='Operation completed') - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(console, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(console, 'print_message', lambda *a, **kw: None) @@ -1083,7 +1079,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True # Default successful response return Output(success=True, text='Operation completed') - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) @@ -1119,7 +1115,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True # All other commands succeed return Output(success=True, json_data=[]) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) @@ -1137,7 +1133,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True def mock_print(message): exception_caught.append(message) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) monkeypatch.setattr('builtins.print', mock_print) @@ -1274,7 +1270,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1319,8 +1315,8 @@ def mock_run(*args, **kwargs): return Output(success=True, text='{}') monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) - monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1392,7 +1388,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1460,7 +1456,7 @@ def mock_run(command, ok_message='', error_message='', print_output=False, print def mock_get_infra_rg_name(deployment, index): return f'test-rg-{deployment.value}-{index}' if index else f'test-rg-{deployment.value}' - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) @@ -1503,7 +1499,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) @@ -1566,7 +1562,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True # Resource group deletion succeeds return Output(success=True, text='Operation completed') - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) monkeypatch.setattr(console, 'print_success', lambda *a, **kw: None) @@ -1623,7 +1619,7 @@ def mock_run(command, ok_message='', error_message='', print_command_to_run=True # Default response for delete/purge operations return Output(success=True, text='Operation completed') - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index d27a987..a648111 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -23,9 +23,8 @@ def test_get_infra_rg_name(monkeypatch): class DummyInfra: value = 'foo' - monkeypatch.setattr(utils, 'validate_infrastructure', lambda x: x) - assert utils.get_infra_rg_name(DummyInfra) == 'apim-infra-foo' - assert utils.get_infra_rg_name(DummyInfra, 2) == 'apim-infra-foo-2' + assert az.get_infra_rg_name(DummyInfra) == 'apim-infra-foo' + assert az.get_infra_rg_name(DummyInfra, 2) == 'apim-infra-foo-2' def test_get_rg_name(): assert az.get_rg_name('foo') == 'apim-sample-foo' @@ -37,7 +36,7 @@ def test_get_rg_name(): def test_run_success(monkeypatch): monkeypatch.setattr('subprocess.check_output', lambda *a, **kw: b'{"a": 1}') - out = utils.run('echo', print_command_to_run=False) + out = az.run('echo', print_command_to_run=False) assert out.success is True assert out.json_data == {'a': 1} @@ -47,7 +46,7 @@ class DummyErr(Exception): def fail(*a, **kw): raise DummyErr() monkeypatch.setattr('subprocess.check_output', fail) - out = utils.run('bad', print_command_to_run=False) + out = az.run('bad', print_command_to_run=False) assert out.success is False assert isinstance(out.text, str) @@ -212,9 +211,9 @@ def test_build_infrastructure_tags_none_custom_tags(): def test_create_bicep_deployment_group_with_enum(monkeypatch): """Test create_bicep_deployment_group with INFRASTRUCTURE enum.""" mock_create_rg = MagicMock() - monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) + monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(az, 'run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -243,9 +242,9 @@ def test_create_bicep_deployment_group_with_enum(monkeypatch): def test_create_bicep_deployment_group_with_string(monkeypatch): """Test create_bicep_deployment_group with string deployment name.""" mock_create_rg = MagicMock() - monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) + monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(az, 'run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -271,9 +270,9 @@ def test_create_bicep_deployment_group_with_string(monkeypatch): def test_create_bicep_deployment_group_params_file_written(monkeypatch): """Test that bicep parameters are correctly written to file.""" mock_create_rg = MagicMock() - monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) + monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, '_run', mock_run) + monkeypatch.setattr(az, 'run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -317,9 +316,9 @@ def mock_exists(path): def test_create_bicep_deployment_group_no_tags(monkeypatch): """Test create_bicep_deployment_group without tags.""" mock_create_rg = MagicMock() - monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) + monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(az, 'run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -338,9 +337,9 @@ def test_create_bicep_deployment_group_no_tags(monkeypatch): def test_create_bicep_deployment_group_deployment_failure(monkeypatch): """Test create_bicep_deployment_group when deployment fails.""" mock_create_rg = MagicMock() - monkeypatch.setattr(utils, 'create_resource_group', mock_create_rg) + monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) mock_run = MagicMock(return_value=MagicMock(success=False)) - monkeypatch.setattr(utils, 'run', mock_run) + monkeypatch.setattr(az, 'run', mock_run) mock_open_func = mock_open() monkeypatch.setattr(builtins, 'open', mock_open_func) monkeypatch.setattr(builtins, 'print', MagicMock()) @@ -393,8 +392,8 @@ def test_print_functions_comprehensive(): def test_test_url_preflight_check_with_frontdoor(monkeypatch): """Test URL preflight check when Front Door is available.""" - monkeypatch.setattr(utils, 'get_frontdoor_url', lambda x, y: 'https://test.azurefd.net') - monkeypatch.setattr(utils, 'print_message', lambda x, **kw: None) + monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: 'https://test.azurefd.net') + monkeypatch.setattr('console.print_message', lambda x, **kw: None) result = utils.test_url_preflight_check(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg', 'https://apim.com') assert result == 'https://test.azurefd.net' @@ -402,8 +401,8 @@ def test_test_url_preflight_check_with_frontdoor(monkeypatch): def test_test_url_preflight_check_no_frontdoor(monkeypatch): """Test URL preflight check when Front Door is not available.""" - monkeypatch.setattr(utils, 'get_frontdoor_url', lambda x, y: None) - monkeypatch.setattr(utils, 'print_message', lambda x, **kw: None) + monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: None) + monkeypatch.setattr('console.print_message', lambda x, **kw: None) result = utils.test_url_preflight_check(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg', 'https://apim.com') assert result == 'https://apim.com' @@ -441,10 +440,10 @@ def test_determine_policy_path_full_path(): def test_wait_for_apim_blob_permissions_success(monkeypatch): """Test wait_for_apim_blob_permissions with successful wait.""" - monkeypatch.setattr(utils, 'check_apim_blob_permissions', lambda *args: True) - monkeypatch.setattr(utils, 'print_info', lambda x: None) - monkeypatch.setattr(utils, 'print_success', lambda x: None) - monkeypatch.setattr(utils, 'print_error', lambda x: None) + monkeypatch.setattr(az, 'check_apim_blob_permissions', lambda *args: True) + monkeypatch.setattr('console.print_info', lambda x: None) + monkeypatch.setattr('console.print_success', lambda x: None) + monkeypatch.setattr('console.print_error', lambda x: None) result = utils.wait_for_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) assert result is True @@ -452,10 +451,10 @@ def test_wait_for_apim_blob_permissions_success(monkeypatch): def test_wait_for_apim_blob_permissions_failure(monkeypatch): """Test wait_for_apim_blob_permissions with failed wait.""" - monkeypatch.setattr(utils, 'check_apim_blob_permissions', lambda *args: False) - monkeypatch.setattr(utils, 'print_info', lambda x: None) - monkeypatch.setattr(utils, 'print_success', lambda x: None) - monkeypatch.setattr(utils, 'print_error', lambda x: None) + monkeypatch.setattr(az, 'check_apim_blob_permissions', lambda *args: False) + monkeypatch.setattr('console.print_info', lambda x: None) + monkeypatch.setattr('console.print_success', lambda x: None) + monkeypatch.setattr('console.print_error', lambda x: None) result = utils.wait_for_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) assert result is False @@ -494,10 +493,9 @@ def test_read_policy_xml_with_named_values_formatting(monkeypatch): (INFRASTRUCTURE.APIM_ACA, 'apim-aca'), ] ) -def test_get_infra_rg_name_different_types(infra_type, expected_suffix, monkeypatch): +def test_get_infra_rg_name_different_types(infra_type, expected_suffix): """Test get_infra_rg_name with different infrastructure types.""" - monkeypatch.setattr(utils, 'validate_infrastructure', lambda x: x) - result = utils.get_infra_rg_name(infra_type) + result = az.get_infra_rg_name(infra_type) assert result == f'apim-infra-{expected_suffix}' @@ -577,7 +575,7 @@ def mock_subprocess_check_output(cmd, **kwargs): monkeypatch.setattr('subprocess.check_output', mock_subprocess_check_output) - output = utils.run('test command', print_errors=False, print_output=False) + output = az.run('test command', print_errors=False, print_command_to_run=False) assert output.success is False assert output.text == 'test output' @@ -605,9 +603,9 @@ def mock_run_with_tags(*args, **kwargs): assert '--tags' in cmd # Should include tags (with default source=apim-sample) return utils.Output(success=True, text='{}') - monkeypatch.setattr(utils, 'run', mock_run_with_tags) + monkeypatch.setattr(az, 'run', mock_run_with_tags) - utils.create_resource_group('test-rg', 'eastus', {}) # Empty dict, function doesn't return anything + az.create_resource_group('test-rg', 'eastus', {}) # Empty dict, function doesn't return anything # ------------------------------ # ROLE AND PERMISSION TESTS @@ -880,11 +878,11 @@ def test_deploy_sample_with_infrastructure_selection(monkeypatch): lambda *args, **kwargs: mock_output) # Mock utility functions - monkeypatch.setattr(utils, 'get_infra_rg_name', + monkeypatch.setattr(az, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx}') - monkeypatch.setattr(utils, 'print_error', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_success', lambda *args, **kwargs: None) - monkeypatch.setattr(utils, 'print_val', lambda *args, **kwargs: None) + monkeypatch.setattr('console.print_error', lambda *args, **kwargs: None) + monkeypatch.setattr('console.print_success', lambda *args, **kwargs: None) + monkeypatch.setattr('console.print_val', lambda *args, **kwargs: None) # Test the deployment result = nb_helper.deploy_sample({'test': {'value': 'param'}}) From dc0f7b38fb983488a722dd91e7a8d7d80f53b412 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:26:31 -0500 Subject: [PATCH 18/23] Move print from utils to console --- infrastructure/afd-apim-pe/create.ipynb | 3 +- .../afd-apim-pe/create_infrastructure.py | 3 +- infrastructure/apim-aca/create.ipynb | 3 +- infrastructure/appgw-apim-pe/create.ipynb | 3 +- .../appgw-apim-pe/create_infrastructure.py | 3 +- infrastructure/simple-apim/create.ipynb | 3 +- samples/_TEMPLATE/create.ipynb | 9 ++-- samples/authX-pro/create.ipynb | 10 ++--- samples/authX/create.ipynb | 9 ++-- samples/azure-maps/create.ipynb | 11 ++--- samples/general/create.ipynb | 9 ++-- samples/load-balancing/create.ipynb | 23 +++++----- samples/oauth-3rd-party/create.ipynb | 15 ++++--- samples/secure-blob-access/create.ipynb | 45 ++++++++++--------- tests/python/test_utils.py | 13 +++--- 15 files changed, 86 insertions(+), 76 deletions(-) diff --git a/infrastructure/afd-apim-pe/create.ipynb b/infrastructure/afd-apim-pe/create.ipynb index 43091ae..d91b694 100644 --- a/infrastructure/afd-apim-pe/create.ipynb +++ b/infrastructure/afd-apim-pe/create.ipynb @@ -20,7 +20,8 @@ "outputs": [], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", - "from utils import InfrastructureNotebookHelper, print_ok\n", + "from utils import InfrastructureNotebookHelper\n", + "from console import print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", diff --git a/infrastructure/afd-apim-pe/create_infrastructure.py b/infrastructure/afd-apim-pe/create_infrastructure.py index 29837a8..34944d0 100644 --- a/infrastructure/afd-apim-pe/create_infrastructure.py +++ b/infrastructure/afd-apim-pe/create_infrastructure.py @@ -7,8 +7,7 @@ # APIM Samples imports import azure_resources as az -from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH -from apimtypes import INFRASTRUCTURE +from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH, INFRASTRUCTURE from infrastructures import AfdApimAcaInfrastructure import utils diff --git a/infrastructure/apim-aca/create.ipynb b/infrastructure/apim-aca/create.ipynb index 8c5f107..1110f56 100644 --- a/infrastructure/apim-aca/create.ipynb +++ b/infrastructure/apim-aca/create.ipynb @@ -18,7 +18,8 @@ "outputs": [], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", - "from utils import InfrastructureNotebookHelper, print_ok\n", + "from utils import InfrastructureNotebookHelper\n", + "from console import print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", diff --git a/infrastructure/appgw-apim-pe/create.ipynb b/infrastructure/appgw-apim-pe/create.ipynb index 545453f..4f011ec 100644 --- a/infrastructure/appgw-apim-pe/create.ipynb +++ b/infrastructure/appgw-apim-pe/create.ipynb @@ -20,7 +20,8 @@ "outputs": [], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", - "from utils import InfrastructureNotebookHelper, print_ok\n", + "from utils import InfrastructureNotebookHelper\n", + "from console import print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", diff --git a/infrastructure/appgw-apim-pe/create_infrastructure.py b/infrastructure/appgw-apim-pe/create_infrastructure.py index 95e8141..a0322bc 100644 --- a/infrastructure/appgw-apim-pe/create_infrastructure.py +++ b/infrastructure/appgw-apim-pe/create_infrastructure.py @@ -7,8 +7,7 @@ # APIM Samples imports import azure_resources as az -from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH -from apimtypes import INFRASTRUCTURE +from apimtypes import APIM_SKU, API, GET_APIOperation, BACKEND_XML_POLICY_PATH, INFRASTRUCTURE from infrastructures import AppGwApimPeInfrastructure import utils diff --git a/infrastructure/simple-apim/create.ipynb b/infrastructure/simple-apim/create.ipynb index 8be0418..78d1ab6 100644 --- a/infrastructure/simple-apim/create.ipynb +++ b/infrastructure/simple-apim/create.ipynb @@ -18,7 +18,8 @@ "outputs": [], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", - "from utils import InfrastructureNotebookHelper, print_ok\n", + "from utils import InfrastructureNotebookHelper\n", + "from console import print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", diff --git a/samples/_TEMPLATE/create.ipynb b/samples/_TEMPLATE/create.ipynb index 9609715..aaf4a4a 100644 --- a/samples/_TEMPLATE/create.ipynb +++ b/samples/_TEMPLATE/create.ipynb @@ -17,6 +17,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -55,7 +56,7 @@ "# apis: List[API] = [api1, apin]\n", "apis: List[API] = []\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_okbook initialized')" ] }, { @@ -88,9 +89,9 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_okoyment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -129,7 +130,7 @@ "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_okdone!')" ] } ], diff --git a/samples/authX-pro/create.ipynb b/samples/authX-pro/create.ipynb index b631664..004bae6 100644 --- a/samples/authX-pro/create.ipynb +++ b/samples/authX-pro/create.ipynb @@ -17,6 +17,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -96,7 +97,7 @@ "# APIs Array\n", "apis: List[API] = [hr_employees, hr_benefits]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -131,9 +132,9 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL')\n", " apim_products = output.getJson('productOutputs', 'Products')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -154,7 +155,6 @@ "metadata": {}, "outputs": [], "source": [ - "import utils\n", "from apimrequests import ApimRequests\n", "from apimtesting import ApimTesting\n", "from apimtypes import Role\n", @@ -243,7 +243,7 @@ "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] } ], diff --git a/samples/authX/create.ipynb b/samples/authX/create.ipynb index c5a3c26..02325c0 100644 --- a/samples/authX/create.ipynb +++ b/samples/authX/create.ipynb @@ -17,6 +17,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -73,7 +74,7 @@ "# APIs Array\n", "apis: List[API] = [hr_employees]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -106,9 +107,9 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -190,7 +191,7 @@ "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] } ], diff --git a/samples/azure-maps/create.ipynb b/samples/azure-maps/create.ipynb index 4b7e9bb..be28510 100644 --- a/samples/azure-maps/create.ipynb +++ b/samples/azure-maps/create.ipynb @@ -21,6 +21,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_info, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -69,7 +70,7 @@ "# APIs Array\n", "apis: List[API] = [maps]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -101,9 +102,9 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -149,7 +150,7 @@ "reqs = ApimRequests(endpoint_url, api_subscription_key, request_headers)\n", "\n", "# Test Azure Maps API endpoints\n", - "utils.print_info(\"Testing Azure Maps API operations...\")\n", + "print_info(\"Testing Azure Maps API operations...\")\n", "\n", "# Test default route with SAS token auth\n", "output = reqs.singleGet(f'{map_path}/default/geocode?query=15127%20NE%2024th%20Street%20Redmond%20WA', msg = 'Calling Default Route API with SAS Token Auth. Expect 200.')\n", @@ -184,7 +185,7 @@ "tests.verify(outputJson['message'], 'Access denied due to missing subscription key. Make sure to include subscription key when making requests to an API.')\n", "\n", "tests.print_summary()\n", - "utils.print_ok('✅ All tests completed successfully!')" + "print_ok('✅ All tests completed successfully!')" ] } ], diff --git a/samples/general/create.ipynb b/samples/general/create.ipynb index 6d7cf92..d4c95f9 100644 --- a/samples/general/create.ipynb +++ b/samples/general/create.ipynb @@ -17,6 +17,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -57,7 +58,7 @@ "# APIs Array\n", "apis: List[API] = [request_headers, api_id]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -88,9 +89,9 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM Gateway URL')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -143,7 +144,7 @@ "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] } ], diff --git a/samples/load-balancing/create.ipynb b/samples/load-balancing/create.ipynb index 643a26c..5b12b0d 100644 --- a/samples/load-balancing/create.ipynb +++ b/samples/load-balancing/create.ipynb @@ -17,6 +17,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_info, print_message, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -64,7 +65,7 @@ "# APIs Array\n", "apis: List[API] = [lb_prioritized, lb_prioritized_weighted, lb_equal_weight, lb_unequal_weight]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -97,9 +98,9 @@ " app_insights_name = output.get('applicationInsightsName', 'Application Insights Name')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -126,7 +127,7 @@ "\n", "def zzzs():\n", " sleep_in_s = 5\n", - " utils.print_message(f'Waiting for {sleep_in_s} seconds for the backend timeouts to reset before starting the next set of calls', blank_above=True)\n", + " print_message(f'Waiting for {sleep_in_s} seconds for the backend timeouts to reset before starting the next set of calls', blank_above=True)\n", " time.sleep(sleep_in_s)\n", "\n", "tests = ApimTesting(\"Load Balancing Sample Tests\", sample_folder, deployment)\n", @@ -154,43 +155,43 @@ "tests.verify(json.loads(output)['count'], 1)\n", "\n", "# Test different load balancing strategies\n", - "utils.print_info(\"Testing load balancing strategies...\")\n", + "print_info(\"Testing load balancing strategies...\")\n", "\n", "# 1) Prioritized distribution\n", - "utils.print_message('1/5: Starting API calls for prioritized distribution')\n", + "print_message('1/5: Starting API calls for prioritized distribution')\n", "api_results_prioritized = reqs.multiGet('/lb-prioritized', runs = 15, msg = 'Calling prioritized APIs')\n", "tests.verify(len(api_results_prioritized), 15)\n", "\n", "# 2) Weighted equal distribution\n", "zzzs()\n", - "utils.print_message('2/5: Starting API calls for weighted distribution (50/50)', blank_above = True)\n", + "print_message('2/5: Starting API calls for weighted distribution (50/50)', blank_above = True)\n", "reqs.subscriptionKey = apim_apis[2]['subscriptionPrimaryKey']\n", "api_results_weighted_equal = reqs.multiGet('/lb-weighted-equal', runs = 15, msg='Calling weighted (equal) APIs')\n", "tests.verify(len(api_results_weighted_equal), 15)\n", "\n", "# 3) Weighted unequal distribution\n", "zzzs()\n", - "utils.print_message('3/5: Starting API calls for weighted distribution (80/20)', blank_above = True)\n", + "print_message('3/5: Starting API calls for weighted distribution (80/20)', blank_above = True)\n", "reqs.subscriptionKey = apim_apis[3]['subscriptionPrimaryKey']\n", "api_results_weighted_unequal = reqs.multiGet('/lb-weighted-unequal', runs = 15, msg = 'Calling weighted (unequal) APIs')\n", "tests.verify(len(api_results_weighted_unequal), 15)\n", "\n", "# 4) Prioritized and weighted distribution\n", "zzzs()\n", - "utils.print_message('4/5: Starting API calls for prioritized & weighted distribution', blank_above=True)\n", + "print_message('4/5: Starting API calls for prioritized & weighted distribution', blank_above=True)\n", "reqs.subscriptionKey = apim_apis[1]['subscriptionPrimaryKey']\n", "api_results_prioritized_and_weighted = reqs.multiGet('/lb-prioritized-weighted', runs=20, msg='Calling prioritized & weighted APIs')\n", "tests.verify(len(api_results_prioritized_and_weighted), 20)\n", "\n", "# 5) Prioritized and weighted with recovery time\n", "zzzs()\n", - "utils.print_message('5/5: Starting API calls for prioritized & weighted distribution (500ms sleep)', blank_above = True)\n", + "print_message('5/5: Starting API calls for prioritized & weighted distribution (500ms sleep)', blank_above = True)\n", "api_results_prioritized_and_weighted_sleep = reqs.multiGet('/lb-prioritized-weighted', runs = 20, msg = 'Calling prioritized & weighted APIs', sleepMs=500)\n", "tests.verify(len(api_results_prioritized_and_weighted_sleep), 20)\n", "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] }, { diff --git a/samples/oauth-3rd-party/create.ipynb b/samples/oauth-3rd-party/create.ipynb index c7764e6..3cc45ab 100644 --- a/samples/oauth-3rd-party/create.ipynb +++ b/samples/oauth-3rd-party/create.ipynb @@ -27,6 +27,7 @@ "import utils\n", "from apimtypes import *\n", "import os\n", + "from console import print_error, print_info, print_ok\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -57,7 +58,7 @@ "\n", "# Validate OAuth credentials\n", "if not client_id or not client_secret:\n", - " utils.print_error('Please set the SPOTIFY_CLIENT_ID and SPOTIFY_CLIENT_SECRET environment variables in the root .env file before running this notebook.')\n", + " print_error('Please set the SPOTIFY_CLIENT_ID and SPOTIFY_CLIENT_SECRET environment variables in the root .env file before running this notebook.')\n", " raise ValueError('Missing Spotify OAuth credentials')\n", "\n", "# Define the APIs and their operations and policies\n", @@ -95,7 +96,7 @@ "# APIs Array\n", "apis: List[API] = [spotify]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -131,9 +132,9 @@ " apim_apis = output.getJson('apiOutputs', 'APIs')\n", " spotify_oauth_redirect_url = output.get('spotifyOAuthRedirectUrl', 'OAuth Redirect URL')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -207,7 +208,7 @@ " UserHelper.get_user_by_role(Role.MARKETING_MEMBER),\n", " nb_helper.jwt_key_value\n", ")\n", - "utils.print_info(f'JWT token for Marketing Member:\\n{encoded_jwt_token_marketing_member}')\n", + "print_info(f'JWT token for Marketing Member:\\n{encoded_jwt_token_marketing_member}')\n", "\n", "# Test Spotify API integration\n", "reqs = ApimRequests(endpoint_url, api_subscription_key, request_headers)\n", @@ -219,7 +220,7 @@ "\n", "artist = json.loads(output)\n", "tests.verify(artist['name'], 'Taylor Swift')\n", - "utils.print_info(f'{artist[\"name\"]} has a popularity rating of {artist[\"popularity\"]} with {artist[\"followers\"][\"total\"]:,} followers on Spotify.')\n", + "print_info(f'{artist[\"name\"]} has a popularity rating of {artist[\"popularity\"]} with {artist[\"followers\"][\"total\"]:,} followers on Spotify.')\n", "\n", "# Test unauthorized access (should fail with 401)\n", "reqsNoApiSubscription = ApimRequests(endpoint_url, None, request_headers)\n", @@ -229,7 +230,7 @@ "tests.verify(outputJson['message'], 'Access denied due to missing subscription key. Make sure to include subscription key when making requests to an API.')\n", "\n", "tests.print_summary()\n", - "utils.print_ok('✅ All OAuth integration tests completed successfully!')" + "print_ok('✅ All OAuth integration tests completed successfully!')" ] } ], diff --git a/samples/secure-blob-access/create.ipynb b/samples/secure-blob-access/create.ipynb index 57977d9..8f510e0 100644 --- a/samples/secure-blob-access/create.ipynb +++ b/samples/secure-blob-access/create.ipynb @@ -19,6 +19,7 @@ "source": [ "import utils\n", "from apimtypes import *\n", + "from console import print_error, print_info, print_ok, print_val, print_warning\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -96,7 +97,7 @@ "# APIs Array\n", "apis: List[API] = [secure_blob]\n", "\n", - "utils.print_ok('Notebook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -137,9 +138,9 @@ " container_name = output.get('blobContainerName', 'Blob Container Name')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " utils.print_ok('Deployment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", - " utils.print_error(\"Deployment failed!\")\n", + " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" ] }, @@ -161,16 +162,16 @@ "outputs": [], "source": [ "# Verify APIM managed identity permissions for blob access\n", - "utils.print_info('Verifying APIM Managed Identity Permissions...')\n", + "print_info('Verifying APIM Managed Identity Permissions...')\n", "\n", "# Check permissions with automatic retry (role assignments can take time to propagate)\n", "permissions_ready = utils.wait_for_apim_blob_permissions(apim_name, storage_account_name, rg_name, 5)\n", "\n", "if permissions_ready:\n", - " utils.print_ok('APIM permissions verified successfully')\n", + " print_ok('APIM permissions verified successfully')\n", "else:\n", - " utils.print_warning('Permission verification incomplete - you may encounter 503/403 errors during testing')\n", - " utils.print_info('If you see 503 errors in the next step, wait a few minutes and try again.')" + " print_warning('Permission verification incomplete - you may encounter 503/403 errors during testing')\n", + " print_info('If you see 503 errors in the next step, wait a few minutes and try again.')" ] }, { @@ -208,26 +209,26 @@ " if sas_url == 'N/A':\n", " return response\n", "\n", - " utils.print_info(f\"Secure Blob URL: {sas_url}\")\n", - " utils.print_info(f\"Expires At: {access_info.get('expire_at', 'N/A')}\")\n", + " print_info(f\"Secure Blob URL: {sas_url}\")\n", + " print_info(f\"Expires At: {access_info.get('expire_at', 'N/A')}\")\n", "\n", " # Test direct blob access using the valet key (SAS URL)\n", - " utils.print_info(\"🧪 Testing direct blob access...\")\n", + " print_info(\"🧪 Testing direct blob access...\")\n", "\n", " try:\n", " blob_response = requests.get(access_info['sas_url'])\n", " if blob_response.status_code == 200:\n", - " utils.print_info(\"✅ Direct blob access successful!\")\n", + " print_info(\"✅ Direct blob access successful!\")\n", " content_preview = blob_response.text[:200] + \"...\" if len(blob_response.text) > 200 else blob_response.text\n", - " utils.print_val(\"Content preview:\", content_preview.strip(), True)\n", + " print_val(\"Content preview:\", content_preview.strip(), True)\n", " return content_preview.strip()\n", " else:\n", - " utils.print_error(f\"❌ Direct blob access failed: {blob_response.status_code}\")\n", + " print_error(f\"❌ Direct blob access failed: {blob_response.status_code}\")\n", " return blob_response.status_code\n", " except Exception as e:\n", - " utils.print_error(f\"Error accessing blob directly: {str(e)}\")\n", + " print_error(f\"Error accessing blob directly: {str(e)}\")\n", " except (json.JSONDecodeError, AttributeError):\n", - " utils.print_error(\"Failed to parse JSON response or response is not in expected format.\")\n", + " print_error(\"Failed to parse JSON response or response is not in expected format.\")\n", " return response\n", "\n", "tests = ApimTesting(\"Secure Blob Access Sample Tests\", sample_folder, deployment)\n", @@ -249,40 +250,40 @@ "api_subscription_key = apim_apis[0]['subscriptionPrimaryKey']\n", "\n", "# Test 1: Authorized user with HR Member role\n", - "utils.print_info(\"1️⃣ Testing with Authorized User (HR Member role)\")\n", + "print_info(\"1️⃣ Testing with Authorized User (HR Member role)\")\n", "\n", "# Create JWT token for HR Member role\n", "encoded_jwt_token_hr_member = AuthFactory.create_symmetric_jwt_token_for_user(\n", " UserHelper.get_user_by_role(Role.HR_MEMBER),\n", " nb_helper.jwt_key_value\n", ")\n", - "utils.print_info(f'JWT token for HR Member:\\n{encoded_jwt_token_hr_member}')\n", + "print_info(f'JWT token for HR Member:\\n{encoded_jwt_token_hr_member}')\n", "\n", "# Test secure blob access with authorization\n", "reqsApimAuthorized = ApimRequests(endpoint_url, api_subscription_key, request_headers)\n", "reqsApimAuthorized.headers['Authorization'] = f'Bearer {encoded_jwt_token_hr_member}'\n", "\n", - "utils.print_info(f\"🔒 Getting secure access for {file_name} with authorized user...\")\n", + "print_info(f\"🔒 Getting secure access for {file_name} with authorized user...\")\n", "response = reqsApimAuthorized.singleGet(f'/{api_prefix}secure-files/{file_name}',\n", " msg=f'Requesting secure access for {file_name} (authorized)')\n", "output = handleResponse(response)\n", "tests.verify(output, 'This is an HR document.')\n", "\n", "# Test 2: Unauthorized user without required role\n", - "utils.print_info(\"2️⃣ Testing with Unauthorized User (no role)\")\n", + "print_info(\"2️⃣ Testing with Unauthorized User (no role)\")\n", "\n", "# Create JWT token for user with no role\n", "encoded_jwt_token_no_role = AuthFactory.create_symmetric_jwt_token_for_user(\n", " UserHelper.get_user_by_role(Role.NONE),\n", " nb_helper.jwt_key_value\n", ")\n", - "utils.print_info(f'JWT token for user with no role:\\n{encoded_jwt_token_no_role}')\n", + "print_info(f'JWT token for user with no role:\\n{encoded_jwt_token_no_role}')\n", "\n", "# Test access denial for unauthorized user\n", "reqsApimUnauthorized = ApimRequests(endpoint_url, api_subscription_key, request_headers)\n", "reqsApimUnauthorized.headers['Authorization'] = f'Bearer {encoded_jwt_token_no_role}'\n", "\n", - "utils.print_info(f\"🔒 Attempting to obtain secure access for {file_name} with unauthorized user (expect 401/403)...\")\n", + "print_info(f\"🔒 Attempting to obtain secure access for {file_name} with unauthorized user (expect 401/403)...\")\n", "response = reqsApimUnauthorized.singleGet(f'/{api_prefix}secure-files/{file_name}',\n", " msg=f'Requesting secure access for {file_name} (unauthorized)')\n", "output = handleResponse(response)\n", @@ -290,7 +291,7 @@ "\n", "tests.print_summary()\n", "\n", - "utils.print_ok('All done!')" + "print_ok('All done!')" ] } ], diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index a648111..6945964 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -15,6 +15,7 @@ import utils import json_utils import azure_resources as az +from console import print_error, print_info, print_message, print_success, print_val, print_warning # ------------------------------ # get_infra_rg_name & get_rg_name @@ -371,12 +372,12 @@ def test_print_functions_comprehensive(): try: # Test all print functions - utils.print_info('Test info message') - utils.print_success('Test success message') - utils.print_warning('Test warning message') - utils.print_error('Test error message') - utils.print_message('Test message') - utils.print_val('Test key', 'Test value') + print_info('Test info message') + print_success('Test success message') + print_warning('Test warning message') + print_error('Test error message') + print_message('Test message') + print_val('Test key', 'Test value') output = captured_output.getvalue() assert 'Test info message' in output From 34c34fcefa608998b7c175356fde981924bfebb1 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:37:24 -0500 Subject: [PATCH 19/23] Clean-up and fixes --- infrastructure/afd-apim-pe/create.ipynb | 4 +-- infrastructure/apim-aca/create.ipynb | 35 +++++++++++++++++++++-- infrastructure/simple-apim/clean-up.ipynb | 26 ++++++++++++++--- samples/_TEMPLATE/create.ipynb | 23 ++++++++++----- samples/authX-pro/create.ipynb | 4 +-- samples/authX/create.ipynb | 4 +-- samples/azure-maps/create.ipynb | 4 +-- samples/general/create.ipynb | 2 +- 8 files changed, 79 insertions(+), 23 deletions(-) diff --git a/infrastructure/afd-apim-pe/create.ipynb b/infrastructure/afd-apim-pe/create.ipynb index d91b694..68bc930 100644 --- a/infrastructure/afd-apim-pe/create.ipynb +++ b/infrastructure/afd-apim-pe/create.ipynb @@ -56,9 +56,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/infrastructure/apim-aca/create.ipynb b/infrastructure/apim-aca/create.ipynb index 1110f56..c7a9595 100644 --- a/infrastructure/apim-aca/create.ipynb +++ b/infrastructure/apim-aca/create.ipynb @@ -15,7 +15,36 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initializing Infrastructure Notebook Helper with the following parameters:\n", + "\n", + "👉🏽 \u001b[1;34mLocation : eastus2\u001b[0m \n", + "👉🏽 \u001b[1;34mInfrastructure : apim-aca\u001b[0m \n", + "👉🏽 \u001b[1;34mIndex : 1\u001b[0m \n", + "👉🏽 \u001b[1;34mAPIM SKU : Basicv2\u001b[0m \n", + "\n", + "👉🏽 \u001b[1;34mCurrent user : simonkurtz@microsoft.com\u001b[0m\n", + "👉🏽 \u001b[1;34mCurrent user ID : 744cffd5-e99d-4cc0-9fe3-2d284e07a1c4\u001b[0m\n", + "👉🏽 \u001b[1;34mTenant ID : 16b3c013-d300-468d-ac64-7eda0820b6d3\u001b[0m\n", + "👉🏽 \u001b[1;34mSubscription ID : 5fb73327-9152-4f64-bf8a-90dc0cc4ad8f\u001b[0m\n", + "\n", + "🚀 Creating infrastructure...\n", + "\n", + " Infrastructure : apim-aca\n", + " Index : 1\n", + " Resource group : apim-infra-apim-aca-1\n", + " Location : eastus2\n", + " APIM SKU : Basicv2\n", + "\n", + "📁 Changed working directory to: C:\\Dev\\Azure-Samples\\Apim-Samples\\infrastructure\\apim-aca\n", + "📝 Updated the policy XML in the bicep parameters file 'params.json'\n" + ] + } + ], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", "from utils import InfrastructureNotebookHelper\n", @@ -54,9 +83,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/infrastructure/simple-apim/clean-up.ipynb b/infrastructure/simple-apim/clean-up.ipynb index 1c0d70c..05e66e7 100644 --- a/infrastructure/simple-apim/clean-up.ipynb +++ b/infrastructure/simple-apim/clean-up.ipynb @@ -11,9 +11,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "👉🏽 \u001b[1;34mCleaning up resources for simple-apim - 1\u001b[0m \n", + "👉🏽 \u001b[1;34mResource group : apim-infra-simple-apim-1\u001b[0m \n", + "👉🏽 \u001b[1;34mFound 1 resource(s) to clean up. Processing in parallel...\u001b[0m \n", + "👉🏽 \u001b[1;34mStarting parallel cleanup of 1 resource(s) with 1 worker(s)...\u001b[0m \n", + "👉🏽 \u001b[1;34mDeleting and purging apim 'apim-pwpn7jvwjh7em'...\u001b[0m \n", + "✅ \u001b[1;32m✓ Cleaned up apim 'apim-pwpn7jvwjh7em' (1/1)\u001b[0m ⌚ 15:32:07.104892 \n", + "\n", + "✅ \u001b[1;32mAll 1 resource(s) cleaned up successfully!\u001b[0m ⌚ 15:32:07.104892 \n", + "ℹ️ \u001b[1;32mDeleting resource group 'apim-infra-simple-apim-1'...\u001b[0m ⌚ 15:32:07.104892 \n", + "ℹ️ \u001b[1;32mCleanup completed.\u001b[0m ⌚ 15:33:26.835597 \n" + ] + } + ], "source": [ "from apimtypes import INFRASTRUCTURE\n", "from infrastructures import cleanup_infra_deployments\n", @@ -26,9 +44,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/samples/_TEMPLATE/create.ipynb b/samples/_TEMPLATE/create.ipynb index aaf4a4a..ac49f66 100644 --- a/samples/_TEMPLATE/create.ipynb +++ b/samples/_TEMPLATE/create.ipynb @@ -26,7 +26,7 @@ "rg_location = 'eastus2'\n", "index = 1\n", "apim_sku = APIM_SKU.BASICV2 # Options: 'BASICV2', 'STANDARDV2', 'PREMIUMV2'\n", - "deployment = INFRASTRUCTURE.AFD_APIM_PE\n", + "deployment = INFRASTRUCTURE.AFD_APIM_PE # Options: see supported_infras below\n", "api_prefix = 'template-' # ENTER A PREFIX FOR THE APIS TO REDUCE COLLISION POTENTIAL WITH OTHER SAMPLES\n", "tags = ['tag1', 'tag2'] # ENTER DESCRIPTIVE TAGS\n", "\n", @@ -56,7 +56,7 @@ "# apis: List[API] = [api1, apin]\n", "apis: List[API] = []\n", "\n", - "print_okbook initialized')" + "print_ok('Notebook initialized')" ] }, { @@ -89,7 +89,7 @@ " apim_gateway_url = output.get('apimResourceGatewayURL', 'APIM API Gateway URL')\n", " apim_apis = output.getJson('apiOutputs', 'APIs')\n", "\n", - " print_okoyment completed successfully')\n", + " print_ok('Deployment completed successfully')\n", "else:\n", " print_error(\"Deployment failed!\")\n", " raise SystemExit(1)" @@ -119,10 +119,19 @@ "tests = ApimTesting(\"Template Sample Tests\", sample_folder, nb_helper.deployment)\n", "\n", "# Example API testing (uncomment and customize as needed)\n", - "# api_subscription_key = apim_apis[0]['subscriptionPrimaryKey']\n", + "# Determine endpoints, URLs, etc. prior to test execution\n", + "# endpoints = utils.get_endpoints(deployment, rg_name)\n", + "# endpoint_url = None\n", + "# request_headers = None\n", "\n", - "# Preflight: Check if the infrastructure architecture deployment uses Azure Front Door. If so, assume that APIM is not directly accessible and use the Front Door URL instead.\n", - "# endpoint_url = utils.test_url_preflight_check(deployment, rg_name, apim_gateway_url)\n", + "# if (endpoints.appgw_hostname and endpoints.appgw_public_ip):\n", + "# endpoint_url = f'https://{endpoints.appgw_public_ip}'\n", + "# request_headers: dict[str, str] = {\"Host\": endpoints.appgw_hostname}\n", + "# else:\n", + "# # Preflight: Check if the infrastructure architecture deployment uses Azure Front Door. If so, assume that APIM is not directly accessible and use the Front Door URL instead.\n", + "# endpoint_url = utils.test_url_preflight_check(deployment, rg_name, apim_gateway_url)\n", + "\n", + "# ********** TEST EXECUTIONS **********\n", "\n", "# reqs = ApimRequests(afd_endpoint_url, api_subscription_key)\n", "# output = reqs.singleGet('/', msg = 'Calling API via Azure Front Door. Expect 200.')\n", @@ -130,7 +139,7 @@ "\n", "tests.print_summary()\n", "\n", - "print_okdone!')" + "print_ok('All done!')" ] } ], diff --git a/samples/authX-pro/create.ipynb b/samples/authX-pro/create.ipynb index 004bae6..e52d033 100644 --- a/samples/authX-pro/create.ipynb +++ b/samples/authX-pro/create.ipynb @@ -249,9 +249,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/samples/authX/create.ipynb b/samples/authX/create.ipynb index 02325c0..966ae37 100644 --- a/samples/authX/create.ipynb +++ b/samples/authX/create.ipynb @@ -197,9 +197,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/samples/azure-maps/create.ipynb b/samples/azure-maps/create.ipynb index be28510..ef328c1 100644 --- a/samples/azure-maps/create.ipynb +++ b/samples/azure-maps/create.ipynb @@ -191,9 +191,9 @@ ], "metadata": { "kernelspec": { - "display_name": "APIM Samples Python 3.12", + "display_name": ".venv (3.12.10)", "language": "python", - "name": "apim-samples" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/samples/general/create.ipynb b/samples/general/create.ipynb index d4c95f9..a89ce8b 100644 --- a/samples/general/create.ipynb +++ b/samples/general/create.ipynb @@ -25,7 +25,7 @@ "\n", "rg_location = 'eastus2'\n", "index = 1\n", - "apim_sku = APIM_SKU.STANDARDV2 # Options: 'BASICV2', 'STANDARDV2', 'PREMIUMV2'\n", + "apim_sku = APIM_SKU.BASICV2 # Options: 'BASICV2', 'STANDARDV2', 'PREMIUMV2'\n", "deployment = INFRASTRUCTURE.SIMPLE_APIM # Options: see supported_infras below\n", "api_prefix = '' # Not defining a prefix for general as these APIs will live off the root\n", "tags = ['general']\n", From ff2e24dd24b6c8b9cacac62ec7714cdeef179607 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:38:30 -0500 Subject: [PATCH 20/23] Clean-up --- infrastructure/apim-aca/create.ipynb | 31 +--------------------------- 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/infrastructure/apim-aca/create.ipynb b/infrastructure/apim-aca/create.ipynb index c7a9595..997150d 100644 --- a/infrastructure/apim-aca/create.ipynb +++ b/infrastructure/apim-aca/create.ipynb @@ -15,36 +15,7 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Initializing Infrastructure Notebook Helper with the following parameters:\n", - "\n", - "👉🏽 \u001b[1;34mLocation : eastus2\u001b[0m \n", - "👉🏽 \u001b[1;34mInfrastructure : apim-aca\u001b[0m \n", - "👉🏽 \u001b[1;34mIndex : 1\u001b[0m \n", - "👉🏽 \u001b[1;34mAPIM SKU : Basicv2\u001b[0m \n", - "\n", - "👉🏽 \u001b[1;34mCurrent user : simonkurtz@microsoft.com\u001b[0m\n", - "👉🏽 \u001b[1;34mCurrent user ID : 744cffd5-e99d-4cc0-9fe3-2d284e07a1c4\u001b[0m\n", - "👉🏽 \u001b[1;34mTenant ID : 16b3c013-d300-468d-ac64-7eda0820b6d3\u001b[0m\n", - "👉🏽 \u001b[1;34mSubscription ID : 5fb73327-9152-4f64-bf8a-90dc0cc4ad8f\u001b[0m\n", - "\n", - "🚀 Creating infrastructure...\n", - "\n", - " Infrastructure : apim-aca\n", - " Index : 1\n", - " Resource group : apim-infra-apim-aca-1\n", - " Location : eastus2\n", - " APIM SKU : Basicv2\n", - "\n", - "📁 Changed working directory to: C:\\Dev\\Azure-Samples\\Apim-Samples\\infrastructure\\apim-aca\n", - "📝 Updated the policy XML in the bicep parameters file 'params.json'\n" - ] - } - ], + "outputs": [], "source": [ "from apimtypes import APIM_SKU, INFRASTRUCTURE\n", "from utils import InfrastructureNotebookHelper\n", From b45a694b66af9566903a7ab6558abe12e9593f22 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:40:36 -0500 Subject: [PATCH 21/23] Update test matrix --- tests/Test-Matrix.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/Test-Matrix.md b/tests/Test-Matrix.md index 3525642..e2fe8aa 100644 --- a/tests/Test-Matrix.md +++ b/tests/Test-Matrix.md @@ -2,14 +2,14 @@ **Date / time**: __________________ -| Sample / Infrastructure | SIMPLE APIM | APIM ACA | AFD APIM PE | -|:----------------------------|-----------------------------|-----------------------------|-----------------------------| -| **INFRASTRUCTURE** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **authX** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **authX-pro** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **azure-maps** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **general** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **load-balancing** | **N/A** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **oauth-3rd-party** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **secure-blob-access** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **INFRASTRUCTURE clean-up** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| Sample / Infrastructure | SIMPLE APIM | APIM ACA | AFD APIM PE | App Gateway APIM ACA | +|:----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------| +| **INFRASTRUCTURE** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **authX** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **authX-pro** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **azure-maps** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **general** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **load-balancing** | **N/A** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **oauth-3rd-party** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **secure-blob-access** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **INFRASTRUCTURE clean-up** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | From c8cbba687da8311b5355b15591d77672bfb559bb Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:45:39 -0500 Subject: [PATCH 22/23] Fix wrong import --- TROUBLESHOOTING.md | 8 ++++---- samples/_TEMPLATE/create.ipynb | 3 ++- samples/authX-pro/create.ipynb | 3 ++- samples/authX/create.ipynb | 3 ++- samples/azure-maps/create.ipynb | 3 ++- samples/general/create.ipynb | 3 ++- samples/load-balancing/create.ipynb | 3 ++- samples/oauth-3rd-party/create.ipynb | 3 ++- samples/secure-blob-access/create.ipynb | 3 ++- 9 files changed, 20 insertions(+), 12 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index 4811f5a..0525974 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -33,7 +33,7 @@ ERROR: The content for this response was already consumed The following parameters were supplied, but do not correspond to any parameters defined in the template: 'parameterName' ``` -3. Check that all parameters in your notebook's `bicep_parameters` dictionary match the parameters defined in the `main.bicep` file. +3. Check that all parameters in your notebook's `bicep_parameters` dictionary match the parameters defined in the `main.bicep` file. **Example Fix:** If the error mentions `apimSku` parameter not found: @@ -44,7 +44,7 @@ bicep_parameters = { 'apimSku': { 'value': 'Developer' } # This parameter doesn't exist } -# ✅ Correct - only includes defined parameters +# ✅ Correct - only includes defined parameters bicep_parameters = { 'apis': { 'value': [api.to_dict() for api in apis] } } @@ -214,7 +214,7 @@ Scroll up to see what is executed. #### Bicep -In one case, `%USERPROFILE%\.azure\bin` contained a `bicep.exe` file but with a zero-length. The CLI would recognize that the file is there but fail on execution. +In one case, `%USERPROFILE%\.azure\bin` contained a `bicep.exe` file but with a zero-length. The CLI would recognize that the file is there but fail on execution. 1. Verify that bicep is indeed failing: `az bicep version` 2. Delete `%USERPROFILE%\.azure\bin\bicep.exe`. @@ -234,7 +234,7 @@ Resource group 'name' could not be found 1. Create the infrastructure first by running the appropriate infrastructure deployment from the `/infrastructure/` folder 2. Verify the resource group name matches the expected pattern: ```python - rg_name = utils.get_infra_rg_name(deployment, index) + rg_name = get_infra_rg_name(deployment, index) ``` ### APIM Service Not Found diff --git a/samples/_TEMPLATE/create.ipynb b/samples/_TEMPLATE/create.ipynb index ac49f66..745be84 100644 --- a/samples/_TEMPLATE/create.ipynb +++ b/samples/_TEMPLATE/create.ipynb @@ -18,6 +18,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -37,7 +38,7 @@ "# ------------------------------\n", "\n", "sample_folder = '_TEMPLATE'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/authX-pro/create.ipynb b/samples/authX-pro/create.ipynb index e52d033..8772fc6 100644 --- a/samples/authX-pro/create.ipynb +++ b/samples/authX-pro/create.ipynb @@ -18,6 +18,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -38,7 +39,7 @@ "\n", "# Create the notebook helper with JWT support\n", "sample_folder = 'authX-pro'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, True, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/authX/create.ipynb b/samples/authX/create.ipynb index 966ae37..2a90469 100644 --- a/samples/authX/create.ipynb +++ b/samples/authX/create.ipynb @@ -18,6 +18,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -38,7 +39,7 @@ "\n", "# Create the notebook helper with JWT support\n", "sample_folder = 'authX'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, True, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/azure-maps/create.ipynb b/samples/azure-maps/create.ipynb index ef328c1..91b4324 100644 --- a/samples/azure-maps/create.ipynb +++ b/samples/azure-maps/create.ipynb @@ -22,6 +22,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_info, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -41,7 +42,7 @@ "# ------------------------------\n", "\n", "sample_folder = 'azure-maps'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, index = index, apim_sku = apim_sku)\n", "azure_maps_url = 'https://atlas.microsoft.com'\n", diff --git a/samples/general/create.ipynb b/samples/general/create.ipynb index a89ce8b..5325846 100644 --- a/samples/general/create.ipynb +++ b/samples/general/create.ipynb @@ -18,6 +18,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -37,7 +38,7 @@ "# ------------------------------\n", "\n", "sample_folder = 'general'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/load-balancing/create.ipynb b/samples/load-balancing/create.ipynb index 5b12b0d..47c0159 100644 --- a/samples/load-balancing/create.ipynb +++ b/samples/load-balancing/create.ipynb @@ -18,6 +18,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_info, print_message, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -37,7 +38,7 @@ "# ------------------------------\n", "\n", "sample_folder = 'load-balancing'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/oauth-3rd-party/create.ipynb b/samples/oauth-3rd-party/create.ipynb index 3cc45ab..beac2f6 100644 --- a/samples/oauth-3rd-party/create.ipynb +++ b/samples/oauth-3rd-party/create.ipynb @@ -28,6 +28,7 @@ "from apimtypes import *\n", "import os\n", "from console import print_error, print_info, print_ok\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -48,7 +49,7 @@ "\n", "# Create the notebook helper with JWT support\n", "sample_folder = 'oauth-3rd-party'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, True, index = index, apim_sku = apim_sku)\n", "\n", diff --git a/samples/secure-blob-access/create.ipynb b/samples/secure-blob-access/create.ipynb index 8f510e0..631b4c0 100644 --- a/samples/secure-blob-access/create.ipynb +++ b/samples/secure-blob-access/create.ipynb @@ -20,6 +20,7 @@ "import utils\n", "from apimtypes import *\n", "from console import print_error, print_info, print_ok, print_val, print_warning\n", + "from azure_resources import get_infra_rg_name\n", "\n", "# ------------------------------\n", "# USER CONFIGURATION\n", @@ -40,7 +41,7 @@ "\n", "# Create the notebook helper with JWT support\n", "sample_folder = 'secure-blob-access'\n", - "rg_name = utils.get_infra_rg_name(deployment, index)\n", + "rg_name = get_infra_rg_name(deployment, index)\n", "supported_infras = [INFRASTRUCTURE.AFD_APIM_PE, INFRASTRUCTURE.APIM_ACA, INFRASTRUCTURE.APPGW_APIM_PE, INFRASTRUCTURE.SIMPLE_APIM]\n", "nb_helper = utils.NotebookHelper(sample_folder, rg_name, rg_location, deployment, supported_infras, True, index = index, apim_sku = apim_sku)\n", "\n", From 157fbd9b641be7236b7587c3a414c3dfb1857d32 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Fri, 12 Dec 2025 15:48:48 -0500 Subject: [PATCH 23/23] Format --- .devcontainer/README.md | 20 ++++++++++---------- .devcontainer/devcontainer.json | 2 +- .devcontainer/post-start-setup.sh | 2 +- requirements.txt | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.devcontainer/README.md b/.devcontainer/README.md index 85a8c86..95e94f5 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -160,22 +160,22 @@ Our devcontainer uses two key lifecycle commands optimized for prebuild: #### `onCreateCommand` (Container Creation) ```bash # Creates Python virtual environment and registers Jupyter kernel -echo '🚀 Creating Python virtual environment in workspace...' && -/usr/local/bin/python3.12 -m venv /workspaces/Apim-Samples/.venv --copies && -source /workspaces/Apim-Samples/.venv/bin/activate && -pip install --upgrade pip setuptools wheel ipykernel && +echo '🚀 Creating Python virtual environment in workspace...' && +/usr/local/bin/python3.12 -m venv /workspaces/Apim-Samples/.venv --copies && +source /workspaces/Apim-Samples/.venv/bin/activate && +pip install --upgrade pip setuptools wheel ipykernel && python -m ipykernel install --user --name=apim-samples --display-name='APIM Samples Python 3.12' ``` #### `updateContentCommand` (Content Updates) ```bash # Installs Python packages and configures environment -source /workspaces/Apim-Samples/.venv/bin/activate && -pip install -r requirements.txt && -pip install pytest pytest-cov coverage && -python setup/setup_python_path.py --generate-env && -az config set core.login_experience_v2=off && -az extension add --name containerapp --only-show-errors && +source /workspaces/Apim-Samples/.venv/bin/activate && +pip install -r requirements.txt && +pip install pytest pytest-cov coverage && +python setup/setup_python_path.py --generate-env && +az config set core.login_experience_v2=off && +az extension add --name containerapp --only-show-errors && az extension add --name front-door --only-show-errors ``` diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d7e450a..2752dd2 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -157,4 +157,4 @@ "runArgs": [ "--init" ] -} \ No newline at end of file +} diff --git a/.devcontainer/post-start-setup.sh b/.devcontainer/post-start-setup.sh index f59de03..4921b3f 100644 --- a/.devcontainer/post-start-setup.sh +++ b/.devcontainer/post-start-setup.sh @@ -77,7 +77,7 @@ echo " - Wait for all extensions to install" echo " --> ✅ (.venv) prefix will appear when you open a new terminal" echo "" echo " 3. Start using the infrastructures and samples!" -echo " - You may initially need to select the kernel (top-right above the" +echo " - You may initially need to select the kernel (top-right above the" echo " Jupyter notebook). If so, select the '.venv' Python environment." echo "" echo "============================================================================" diff --git a/requirements.txt b/requirements.txt index 2a1352b..ce9f50b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,4 @@ python-dotenv # Dev tools for linting, formatting, testing, etc. pylint pytest -pytest-cov \ No newline at end of file +pytest-cov