From a84c1446d1b7a4a3a297176f2c0617528ccf9761 Mon Sep 17 00:00:00 2001 From: Simon Kurtz Date: Thu, 7 Aug 2025 00:56:37 -0400 Subject: [PATCH] Add parallel cleanup --- shared/python/utils.py | 191 +++++++++++++++++- tests/python/test_infrastructures.py | 4 +- tests/python/test_utils.py | 282 ++++++++++++++++++++++++--- 3 files changed, 443 insertions(+), 34 deletions(-) diff --git a/shared/python/utils.py b/shared/python/utils.py index c02229c..abed46e 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -15,6 +15,8 @@ import secrets import base64 import inspect +import threading +from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path from typing import Any, Optional, Tuple @@ -31,10 +33,19 @@ BOLD_G = '\x1b[1;32m' # green BOLD_R = '\x1b[1;31m' # red BOLD_Y = '\x1b[1;33m' # yellow +BOLD_C = '\x1b[1;36m' # cyan +BOLD_M = '\x1b[1;35m' # magenta +BOLD_W = '\x1b[1;37m' # white RESET = '\x1b[0m' +# Thread colors for parallel operations +THREAD_COLORS = [BOLD_B, BOLD_G, BOLD_Y, BOLD_C, BOLD_M, BOLD_W] + CONSOLE_WIDTH = 175 +# Thread-safe print lock +_print_lock = threading.Lock() + # ------------------------------ # HELPER FUNCTIONS @@ -1212,10 +1223,111 @@ def read_policy_xml(policy_xml_filepath_or_filename: str, named_values: dict[str return policy_template_xml +def _cleanup_resources_thread_safe(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> tuple[bool, str]: + """ + Thread-safe wrapper for _cleanup_resources with formatted output. + + Args: + deployment_name (str): The deployment name (string). + rg_name (str): The resource group name. + thread_prefix (str): The thread prefix for output formatting. + thread_color (str): ANSI color code for this thread. + + Returns: + tuple[bool, str]: (success, error_message) + """ + try: + with _print_lock: + _print_log(f"{thread_prefix}Starting cleanup for resource group: {rg_name}", '👉đŸŊ ', thread_color) + + # Create a modified version of _cleanup_resources that uses thread-safe printing + _cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color) + + with _print_lock: + _print_log(f"{thread_prefix}Completed cleanup for resource group: {rg_name}", '👉đŸŊ ', thread_color) + + return True, "" + + except Exception as e: + error_msg = f'An error occurred during cleanup of {rg_name}: {str(e)}' + with _print_lock: + _print_log(f"{thread_prefix}{error_msg}", '⛔ ', BOLD_R, show_time=True) + traceback.print_exc() + return False, error_msg + + +def _cleanup_resources_with_thread_safe_printing(deployment_name: str, rg_name: str, thread_prefix: str, thread_color: str) -> None: + """ + Clean up resources with thread-safe printing (internal implementation for parallel execution). + This is a modified version of _cleanup_resources that uses thread-safe output. + """ + if not deployment_name: + with _print_lock: + _print_log(f"{thread_prefix}Missing deployment name parameter.", '⛔ ', BOLD_R) + return + + if not rg_name: + with _print_lock: + _print_log(f"{thread_prefix}Missing resource group name parameter.", '⛔ ', BOLD_R) + return + + try: + with _print_lock: + _print_log(f"{thread_prefix}Resource group : {rg_name}", '👉đŸŊ ', thread_color) + + # Show the deployment details + output = run(f'az deployment group show --name {deployment_name} -g {rg_name} -o json', 'Deployment retrieved', 'Failed to retrieve the deployment', print_command_to_run = False) + + if output.success and output.json_data: + # Delete and purge CognitiveService accounts + output = run(f' az cognitiveservices account list -g {rg_name}', f'Listed CognitiveService accounts', f'Failed to list CognitiveService accounts', print_command_to_run = False) + + if output.success and output.json_data: + for resource in output.json_data: + with _print_lock: + _print_log(f"{thread_prefix}Deleting and purging Cognitive Service Account '{resource['name']}'...", '👉đŸŊ ', thread_color) + output = run(f"az cognitiveservices account delete -g {rg_name} -n {resource['name']}", f"Cognitive Services '{resource['name']}' deleted", f"Failed to delete Cognitive Services '{resource['name']}'", print_command_to_run = False) + output = run(f"az cognitiveservices account purge -g {rg_name} -n {resource['name']} --location \"{resource['location']}\"", f"Cognitive Services '{resource['name']}' purged", f"Failed to purge Cognitive Services '{resource['name']}'", print_command_to_run = False) + + # Delete and purge APIM resources + output = run(f' az apim list -g {rg_name}', f'Listed APIM resources', f'Failed to list APIM resources', print_command_to_run = False) + + if output.success and output.json_data: + for resource in output.json_data: + with _print_lock: + _print_log(f"{thread_prefix}Deleting and purging API Management '{resource['name']}'...", '👉đŸŊ ', thread_color) + output = run(f"az apim delete -n {resource['name']} -g {rg_name} -y", f"API Management '{resource['name']}' deleted", f"Failed to delete API Management '{resource['name']}'", print_command_to_run = False) + output = run(f"az apim deletedservice purge --service-name {resource['name']} --location \"{resource['location']}\"", f"API Management '{resource['name']}' purged", f"Failed to purge API Management '{resource['name']}'", print_command_to_run = False) + + # Delete and purge Key Vault resources + output = run(f' az keyvault list -g {rg_name}', f'Listed Key Vault resources', f'Failed to list Key Vault resources', print_command_to_run = False) + + if output.success and output.json_data: + for resource in output.json_data: + with _print_lock: + _print_log(f"{thread_prefix}Deleting and purging Key Vault '{resource['name']}'...", '👉đŸŊ ', thread_color) + output = run(f"az keyvault delete -n {resource['name']} -g {rg_name}", f"Key Vault '{resource['name']}' deleted", f"Failed to delete Key Vault '{resource['name']}'", print_command_to_run = False) + output = run(f"az keyvault purge -n {resource['name']} --location \"{resource['location']}\"", f"Key Vault '{resource['name']}' purged", f"Failed to purge Key Vault '{resource['name']}'", print_command_to_run = False) + + # Delete the resource group last + with _print_lock: + _print_log(f"{thread_prefix}Deleting resource group '{rg_name}'...", 'â„šī¸ ', thread_color, show_time=True) + output = run(f'az group delete --name {rg_name} -y', f"Resource group '{rg_name}' deleted', f'Failed to delete resource group '{rg_name}'", print_command_to_run = False) + + with _print_lock: + _print_log(f"{thread_prefix}Cleanup completed.", 'â„šī¸ ', thread_color, show_time=True) + + except Exception as e: + with _print_lock: + _print_log(f"{thread_prefix}An error occurred during cleanup: {e}", '⛔ ', BOLD_R) + traceback.print_exc() + + def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[int] | None = None) -> None: """ Clean up infrastructure deployments by deployment enum and index/indexes. Obtains the infra resource group name for each index and calls the private cleanup method. + For multiple indexes, runs cleanup operations in parallel for better performance. Args: deployment (INFRASTRUCTURE): The infrastructure deployment enum value. @@ -1229,13 +1341,84 @@ def cleanup_infra_deployments(deployment: INFRASTRUCTURE, indexes: int | list[in else: indexes_list = [indexes] - i = 1 - for idx in indexes_list: - print_info(f'{i}/{len(indexes_list)}: Cleaning up resources for {deployment} - {idx}', True) + # If only one index, run sequentially (no need for threading overhead) + if len(indexes_list) <= 1: + idx = indexes_list[0] if indexes_list else None + print_info(f'Cleaning up resources for {deployment.value} - {idx}', True) rg_name = get_infra_rg_name(deployment, idx) _cleanup_resources(deployment.value, rg_name) - i += 1 + print_ok('Cleanup completed!') + return + + # For multiple indexes, run in parallel + print_info(f'Starting parallel cleanup for {len(indexes_list)} infrastructure instances', True) + print_info(f'Infrastructure: {deployment.value}') + print_info(f'Indexes: {indexes_list}') + print() + + # Determine max workers (reasonable limit to avoid overwhelming the system) + max_workers = min(len(indexes_list), 4) # Cap at 4 concurrent threads + + cleanup_tasks = [] + for i, idx in enumerate(indexes_list): + rg_name = get_infra_rg_name(deployment, idx) + thread_color = THREAD_COLORS[i % len(THREAD_COLORS)] + thread_prefix = f"{thread_color}[{deployment.value}-{idx}]{RESET}: " + + cleanup_tasks.append({ + 'deployment_name': deployment.value, + 'rg_name': rg_name, + 'thread_prefix': thread_prefix, + 'thread_color': thread_color, + 'index': idx + }) + # Execute cleanup tasks in parallel + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Submit all tasks + future_to_task = { + executor.submit( + _cleanup_resources_thread_safe, + task['deployment_name'], + task['rg_name'], + task['thread_prefix'], + task['thread_color'] + ): task for task in cleanup_tasks + } + + # Track results + completed_count = 0 + failed_count = 0 + + # Wait for completion and handle results + for future in as_completed(future_to_task): + task = future_to_task[future] + try: + success, error_msg = future.result() + completed_count += 1 + + if success: + with _print_lock: + print_ok(f"Completed cleanup for {deployment.value}-{task['index']} ({completed_count}/{len(indexes_list)})") + else: + failed_count += 1 + with _print_lock: + print_error(f"❌ Failed cleanup for {deployment.value}-{task['index']}: {error_msg}") + + except Exception as e: + failed_count += 1 + with _print_lock: + print_error(f"❌ Exception during cleanup for {deployment.value}-{task['index']}: {str(e)}") + + # Final summary + print() + if failed_count == 0: + print_ok(f'All {len(indexes_list)} infrastructure cleanups completed successfully!') + else: + print_warning(f'Completed with {failed_count} failures out of {len(indexes_list)} total cleanups.') + if completed_count > 0: + print_info(f'{completed_count} cleanups succeeded.') + print_ok('All done!') def extract_json(text: str) -> Any: diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index e1e1639..7c30c8e 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -170,7 +170,7 @@ def test_infrastructure_base_policy_fragments_creation(mock_utils): ) # Initialize policy fragments - pfs = infra._define_policy_fragments() + infra._define_policy_fragments() # Check that all base policy fragments are created expected_fragment_names = [ @@ -195,7 +195,7 @@ def test_infrastructure_base_apis_creation(mock_utils): ) # Initialize APIs - apis = infra._define_apis() + infra._define_apis() # Check that hello-world API is created assert len(infra.base_apis) == 1 diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index dbb512e..44a7194 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -406,7 +406,199 @@ def test_cleanup_infra_deployment_single(monkeypatch): monkeypatch.setattr(utils, '_cleanup_resources', lambda deployment_name, rg_name: None) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) # Single item in list should use sequential mode + + +def test_cleanup_infra_deployments_parallel_mode(monkeypatch): + """Test cleanup_infra_deployments with multiple indexes using parallel execution.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" # Return success + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' + + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) + + # Test with multiple indexes (should use parallel mode) + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) + + # Verify all cleanup calls were made + assert len(cleanup_calls) == 3 + + # Check that the correct resource groups were targeted + expected_rgs = [ + 'apim-infra-simple-apim-1', + 'apim-infra-simple-apim-2', + 'apim-infra-simple-apim-3' + ] + actual_rgs = [call[1] for call in cleanup_calls] + assert set(actual_rgs) == set(expected_rgs) + + # Check that thread prefixes contain the correct infrastructure and index info + for deployment_name, rg_name, thread_prefix, thread_color in cleanup_calls: + assert deployment_name == 'simple-apim' + assert 'simple-apim' in thread_prefix + assert thread_color in utils.THREAD_COLORS + + +def test_cleanup_infra_deployments_parallel_with_failures(monkeypatch): + """Test parallel cleanup handling when some threads fail.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name)) + # Simulate failure for index 2 + if 'simple-apim-2' in rg_name: + return False, "Simulated failure for testing" + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' + + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_error', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_warning', lambda *a, **kw: None) + + # Test with multiple indexes where one fails + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) + + # Verify all cleanup attempts were made despite failure + assert len(cleanup_calls) == 3 + + +def test_cleanup_resources_thread_safe_success(monkeypatch): + """Test the thread-safe cleanup wrapper with successful execution.""" + original_calls = [] + + def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): + original_calls.append((deployment_name, rg_name)) + + monkeypatch.setattr(utils, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) + + # Test successful cleanup + success, error_msg = utils._cleanup_resources_thread_safe( + 'test-deployment', 'test-rg', '[TEST]: ', utils.BOLD_G + ) + + assert success is True + assert error_msg == "" + assert len(original_calls) == 1 + assert original_calls[0] == ('test-deployment', 'test-rg') + + +def test_cleanup_resources_thread_safe_failure(monkeypatch): + """Test the thread-safe cleanup wrapper with exception handling.""" + def mock_cleanup_resources_with_thread_safe_printing(deployment_name, rg_name, thread_prefix, thread_color): + raise Exception("Simulated cleanup failure") + + monkeypatch.setattr(utils, '_cleanup_resources_with_thread_safe_printing', mock_cleanup_resources_with_thread_safe_printing) + + # Test failed cleanup + success, error_msg = utils._cleanup_resources_thread_safe( + 'test-deployment', 'test-rg', '[TEST]: ', utils.BOLD_G + ) + + assert success is False + assert "Simulated cleanup failure" in error_msg + + +def test_cleanup_infra_deployments_max_workers_limit(monkeypatch): + """Test that parallel cleanup properly handles different numbers of indexes.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'rg-{deployment.value}-{index}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return utils.Output(success=True, text='{}') + + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) + + # Test with 6 indexes (should use parallel mode and handle all indexes) + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3, 4, 5, 6]) + + # Verify all 6 cleanup calls were made + assert len(cleanup_calls) == 6, f"Expected 6 cleanup calls, got {len(cleanup_calls)}" + + # Check that the correct resource groups were targeted + expected_rgs = [f'rg-simple-apim-{i}' for i in range(1, 7)] + actual_rgs = [call[1] for call in cleanup_calls] + assert set(actual_rgs) == set(expected_rgs), f"Expected RGs {expected_rgs}, got {actual_rgs}" + + # Test with 2 indexes (should use parallel mode) + cleanup_calls.clear() utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2]) + + assert len(cleanup_calls) == 2, f"Expected 2 cleanup calls, got {len(cleanup_calls)}" + + # Test that thread prefixes and colors are assigned properly + for call in cleanup_calls: + deployment_name, rg_name, thread_prefix, thread_color = call + assert deployment_name == 'simple-apim' + assert 'simple-apim' in thread_prefix + assert thread_color in utils.THREAD_COLORS + + +def test_cleanup_infra_deployments_thread_color_assignment(monkeypatch): + """Test that thread colors are assigned correctly and cycle through available colors.""" + cleanup_calls = [] + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + + def mock_get_infra_rg_name(deployment, index): + return f'apim-infra-{deployment.value}-{index}' + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return utils.Output(success=True, text='{}') + + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) + monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls + monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) + + # Test with more indexes than available colors to verify cycling + num_colors = len(utils.THREAD_COLORS) + test_indexes = list(range(1, num_colors + 3)) # More than available colors + + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, test_indexes) + + # Verify colors were assigned and cycled correctly + assigned_colors = [call[3] for call in cleanup_calls] + + # Sort the calls by the index extracted from the rg_name to check in deterministic order + cleanup_calls_sorted = sorted(cleanup_calls, key=lambda x: int(x[1].split('-')[-1])) + assigned_colors_sorted = [call[3] for call in cleanup_calls_sorted] + + # First num_colors should use each color once + for i in range(num_colors): + expected_color = utils.THREAD_COLORS[i % num_colors] + assert assigned_colors_sorted[i] == expected_color + + # Additional colors should cycle back to the beginning + if len(assigned_colors_sorted) > num_colors: + assert assigned_colors_sorted[num_colors] == utils.THREAD_COLORS[0] + assert assigned_colors_sorted[num_colors + 1] == utils.THREAD_COLORS[1] def test_cleanup_infra_deployments_all_infrastructure_types(monkeypatch): @@ -437,45 +629,69 @@ def mock_get_infra_rg_name(deployment, index): def test_cleanup_infra_deployments_index_scenarios(monkeypatch): """Test cleanup_infra_deployments with various index scenarios.""" cleanup_calls = [] + thread_safe_calls = [] def mock_cleanup_resources(deployment_name, rg_name): cleanup_calls.append((deployment_name, rg_name)) + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + thread_safe_calls.append((deployment_name, rg_name, thread_prefix, thread_color)) + return True, "" + def mock_get_infra_rg_name(deployment, index): return f'apim-infra-{deployment.value}-{index}' if index else f'apim-infra-{deployment.value}' + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return utils.Output(success=True, text='{}') + monkeypatch.setattr(utils, '_cleanup_resources', mock_cleanup_resources) + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) - # Test None index + # Test None index (sequential) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) - # Test single integer index + # Test single integer index (sequential) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 5) - # Test list of integers - utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) + # Test single item list (sequential) + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1]) + + # Test list of integers (parallel) + utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [2, 3]) - # Test tuple of integers + # Test tuple of integers (parallel) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, (4, 5)) - # Test empty list + # Test empty list (sequential, with no index) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, []) - # Verify correct calls were made - expected_calls = [ + # Verify sequential calls + expected_sequential_calls = [ ('simple-apim', 'apim-infra-simple-apim'), # None index ('simple-apim', 'apim-infra-simple-apim-5'), # Single index 5 - ('simple-apim', 'apim-infra-simple-apim-1'), # List [1, 2, 3] - first - ('simple-apim', 'apim-infra-simple-apim-2'), # List [1, 2, 3] - second - ('simple-apim', 'apim-infra-simple-apim-3'), # List [1, 2, 3] - third + ('simple-apim', 'apim-infra-simple-apim-1'), # Single item list [1] + ('simple-apim', 'apim-infra-simple-apim'), # Empty list (None index) + ] + + for expected_call in expected_sequential_calls: + assert expected_call in cleanup_calls, f"Expected sequential call {expected_call} not found in {cleanup_calls}" + + # Verify parallel calls (extract just the deployment and rg_name parts) + parallel_calls = [(call[0], call[1]) for call in thread_safe_calls] + expected_parallel_calls = [ + ('simple-apim', 'apim-infra-simple-apim-2'), # List [2, 3] - first + ('simple-apim', 'apim-infra-simple-apim-3'), # List [2, 3] - second ('simple-apim', 'apim-infra-simple-apim-4'), # Tuple (4, 5) - first ('simple-apim', 'apim-infra-simple-apim-5'), # Tuple (4, 5) - second ] - for expected_call in expected_calls: - assert expected_call in cleanup_calls + for expected_call in expected_parallel_calls: + assert expected_call in parallel_calls, f"Expected parallel call {expected_call} not found in {parallel_calls}" # ------------------------------ @@ -1210,33 +1426,45 @@ def mock_get_infra_rg_name(deployment, index): def test_cleanup_edge_cases_comprehensive(monkeypatch): """Test cleanup functions with edge cases and error conditions.""" - + # Test with different index types cleanup_calls = [] - + def mock_cleanup_resources(deployment_name, rg_name): cleanup_calls.append((deployment_name, rg_name)) - + return True, "" + + def mock_cleanup_resources_thread_safe(deployment_name, rg_name, thread_prefix, thread_color): + cleanup_calls.append((deployment_name, rg_name)) + return True, "" + def mock_get_infra_rg_name(deployment, index): return f'rg-{deployment.value}-{index}' if index is not None else f'rg-{deployment.value}' - + + # Mock Azure CLI calls to avoid real execution + def mock_run(*args, **kwargs): + return utils.Output(success=True, text='{}') + monkeypatch.setattr(utils, '_cleanup_resources', mock_cleanup_resources) + monkeypatch.setattr(utils, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(utils, 'get_infra_rg_name', mock_get_infra_rg_name) + monkeypatch.setattr(utils, 'run', mock_run) # Mock Azure CLI calls monkeypatch.setattr(utils, 'print_info', lambda *a, **kw: None) - - # Test with zero index + monkeypatch.setattr(utils, 'print_ok', lambda *a, **kw: None) + + # Test with zero index (single index, uses sequential path) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 0) assert ('simple-apim', 'rg-simple-apim-0') in cleanup_calls - - # Test with negative index + + # Test with negative index (single index, uses sequential path) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, -1) assert ('simple-apim', 'rg-simple-apim--1') in cleanup_calls - - # Test with large index + + # Test with large index (single index, uses sequential path) utils.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 9999) assert ('simple-apim', 'rg-simple-apim-9999') in cleanup_calls - - # Test with mixed positive and negative indexes in list + + # Test with mixed positive and negative indexes in list (multiple indexes, uses parallel path) cleanup_calls.clear() utils.cleanup_infra_deployments(INFRASTRUCTURE.APIM_ACA, [-1, 0, 1]) expected = [ @@ -1245,9 +1473,7 @@ def mock_get_infra_rg_name(deployment, index): ('apim-aca', 'rg-apim-aca-1') ] for call in expected: - assert call in cleanup_calls - - # Test with single-item list + assert call in cleanup_calls # Test with single-item list cleanup_calls.clear() utils.cleanup_infra_deployments(INFRASTRUCTURE.AFD_APIM_PE, [42]) assert ('afd-apim-pe', 'rg-afd-apim-pe-42') in cleanup_calls