diff --git a/.github/python.instructions.md b/.github/python.instructions.md index af9c534..d8f1d7d 100644 --- a/.github/python.instructions.md +++ b/.github/python.instructions.md @@ -48,14 +48,21 @@ applyTo: '**/*.py' 2. Specific type/constant imports (e.g., `from apimtypes import INFRASTRUCTURE`) 3. Specific function imports (e.g., `from console import print_error`) -## Linting (pylint) +## Code Quality Checklist -- Respect the repository pylint configuration at `tests/python/.pylintrc`. -- When changing Python code, run pylint and ensure changes do not worsen the pylint rating unexpectedly. -- Prefer fixing root causes (e.g., import structure, error handling) over suppressions. +Before completing any Python code changes, verify: + +- [ ] All pylint warnings and errors are resolved (`pylint --rcfile=tests/python/.pylintrc `) +- [ ] Code follows PEP 8 and the style guidelines in this file +- [ ] Import statements for modules within this repo are placed last in the imports and are grouped with the `# APIM Samples imports` header +- [ ] Type hints are present where appropriate +- [ ] No unnecessary comments; docstrings are present for functions and classes +- [ ] Edge cases and error handling are implemented +- [ ] Prefer fixing root causes (e.g., import structure, error handling) over suppressions. ## Testing +- Aim for 90+% code coverage for each file. - Add or update pytest unit tests when changing behavior. - Prefer focused tests for the code being changed. - Avoid tests that require live Azure access; mock Azure CLI interactions and `azure_resources` helpers. diff --git a/.gitignore b/.gitignore index 99651e3..31a157c 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,10 @@ labs-in-progress/ # Coverage data and reports .coverage +.coverage.* +coverage.xml +coverage.json +htmlcov/ tests/python/htmlcov/ # Pylint reports @@ -40,3 +44,5 @@ Test-Matrix.html $JsonReport $TextReport +$JsonReportRelative +$TextReportRelative diff --git a/shared/python/infrastructures.py b/shared/python/infrastructures.py index 82b6036..03de939 100644 --- a/shared/python/infrastructures.py +++ b/shared/python/infrastructures.py @@ -145,7 +145,7 @@ def _create_keyvault(self, key_vault_name: str) -> bool: print_error('Failed to assign Key Vault Certificates Officer role to current user.\nThis is an RBAC permission issue - verify your account has sufficient permissions.') return False - print_ok(' Assigned Key Vault Certificates Officer role to current user') + print_ok('Assigned Key Vault Certificates Officer role to current user') # Brief wait for role assignment propagation print_plain('⏳ Waiting for role assignment propagation (15 seconds)...') diff --git a/shared/python/utils.py b/shared/python/utils.py index 956139d..f332e4e 100644 --- a/shared/python/utils.py +++ b/shared/python/utils.py @@ -117,7 +117,6 @@ def __init__(self, rg_location: str, deployment: INFRASTRUCTURE, index: int, api print_val('Infrastructure', self.deployment.value) print_val('Index', self.index) print_val('APIM SKU', self.apim_sku.value) - print_plain('') # ------------------------------ # PUBLIC METHODS diff --git a/tests/README.md b/tests/README.md index f69794a..690ee38 100644 --- a/tests/README.md +++ b/tests/README.md @@ -80,12 +80,27 @@ Run tests separately when you only need test execution: Both scripts: - Run all tests in `tests/python` using pytest -- Generate a code coverage report (HTML output in `tests/python/htmlcov`) -- Store the raw coverage data in `tests/python/.coverage` +- Generate code coverage reports: + - HTML: `htmlcov/index.html` (at repository root) + - XML: `coverage.xml` (for VS Code integration) + - JSON: `coverage.json` +- Store the raw coverage data in `.coverage` (at repository root) #### Viewing Coverage Reports -After running tests, open `tests/python/htmlcov/index.html` in your browser to view detailed coverage information. +**In VS Code (no extra extensions):** +- Open the Testing view (beaker icon in the Activity Bar). +- Click the "Toggle Code Coverage" shield button in the Testing toolbar. +- Run tests from the Testing view (Run All or individual test runs). +- The Explorer will decorate Python files with coverage percentages, and the editor will show covered/uncovered lines. +- Make sure the Python extension is enabled and `coverage`/`pytest-cov` are available in your venv. If needed: + ```powershell + pip install coverage pytest-cov + ``` +- Note: Running pytest only from the terminal won’t decorate the Explorer. Use the Testing UI to see coverage overlays. + +**In Browser:** +- Open `htmlcov/index.html` in your browser for detailed coverage information ## Test Infrastructure diff --git a/tests/Test-Matrix.md b/tests/Test-Matrix.md index e2fe8aa..aee65be 100644 --- a/tests/Test-Matrix.md +++ b/tests/Test-Matrix.md @@ -2,14 +2,14 @@ **Date / time**: __________________ -| Sample / Infrastructure | SIMPLE APIM | APIM ACA | AFD APIM PE | App Gateway APIM ACA | -|:----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------| -| **INFRASTRUCTURE** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **authX** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **authX-pro** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **azure-maps** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **general** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **load-balancing** | **N/A** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **oauth-3rd-party** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **secure-blob-access** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | -| **INFRASTRUCTURE clean-up** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| Sample / Infrastructure | SIMPLE APIM | APIM ACA | AFD APIM PE | App Gateway APIM ACA | App Gateway APIM PE | +|:----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------| +| **INFRASTRUCTURE** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **authX** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **authX-pro** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **azure-maps** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **general** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **load-balancing** | **N/A** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **oauth-3rd-party** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **secure-blob-access** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | +| **INFRASTRUCTURE clean-up** | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | ▢ Local
▢ Dev Container | diff --git a/tests/python/.pylintrc b/tests/python/.pylintrc index 994e913..2d0a6a6 100644 --- a/tests/python/.pylintrc +++ b/tests/python/.pylintrc @@ -20,6 +20,7 @@ disable = R0801, # Duplicate code R0902, # Too many instance attributes R0903, # Too few public methods + R0904, # Too many public methods R0911, # Too many return statements R0912, # Too many branches R0913, # Too many arguments diff --git a/tests/python/check_python.ps1 b/tests/python/check_python.ps1 index 1caea03..87dd9b4 100644 --- a/tests/python/check_python.ps1 +++ b/tests/python/check_python.ps1 @@ -77,9 +77,29 @@ Write-Host " Step 2/2: Running Tests " -ForegroundColor Yellow Write-Host "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -ForegroundColor Yellow Write-Host "" -& "$ScriptDir\run_tests.ps1" +# Capture test output and pass it through to console while also capturing it +$TestOutput = @() +& "$ScriptDir\run_tests.ps1" 2>&1 | Tee-Object -Variable TestOutput | Write-Host $TestExitCode = $LASTEXITCODE +# Parse test results from captured output +$TotalTests = 0 +$PassedTests = 0 +$FailedTests = 0 + +foreach ($Line in $TestOutput) { + $LineStr = $Line.ToString() + # Look for pytest summary line like "908 passed, 9 failed in 26.76s" + if ($LineStr -match '(\d+)\s+passed') { + $PassedTests = [int]::Parse($matches[1]) + } + if ($LineStr -match '(\d+)\s+failed') { + $FailedTests = [int]::Parse($matches[1]) + } +} + +$TotalTests = $PassedTests + $FailedTests + Write-Host "" @@ -92,9 +112,11 @@ Write-Host "║ Final Results ║" -ForegroundColor Write-Host "╚════════════════════════════════════════════╝" -ForegroundColor Cyan Write-Host "" +# Determine statuses $LintStatus = if ($LintExitCode -eq 0) { "✅ PASSED" } else { "⚠️ ISSUES FOUND" } $TestStatus = if ($TestExitCode -eq 0) { "✅ PASSED" } else { "❌ FAILED" } +# Get pylint score $PylintScore = $null $LatestPylintText = Join-Path $ScriptDir "pylint/reports/latest.txt" if (Test-Path $LatestPylintText) { @@ -104,17 +126,39 @@ if (Test-Path $LatestPylintText) { } } -if ($PylintScore) { - $LintStatus = "$LintStatus ($PylintScore)" -} - +# Set colors $LintColor = if ($LintExitCode -eq 0) { "Green" } else { "Yellow" } $TestColor = if ($TestExitCode -eq 0) { "Green" } else { "Red" } -Write-Host " Pylint: " -NoNewline -Write-Host $LintStatus -ForegroundColor $LintColor -Write-Host " Tests: " -NoNewline +# Calculate column widths for alignment +$LabelWidth = "Pylint :".Length # 7 +$Padding = " " * ($LabelWidth - 1) + +# Display Pylint status with score +Write-Host "Pylint : " -NoNewline +Write-Host $LintStatus -ForegroundColor $LintColor -NoNewline +if ($PylintScore) { + Write-Host " ($PylintScore)" -ForegroundColor Gray +} else { + Write-Host "" +} + +# Display Test status with counts +Write-Host "Tests : " -NoNewline Write-Host $TestStatus -ForegroundColor $TestColor + +# Display test counts with right-aligned numbers +if ($TotalTests -gt 0) { + # Calculate padding for right-alignment (max 5 digits) + $TotalPadded = "{0,5}" -f $TotalTests + $PassedPadded = "{0,5}" -f $PassedTests + $FailedPadded = "{0,5}" -f $FailedTests + + Write-Host " • Total : $TotalPadded" -ForegroundColor Gray + Write-Host " • Passed : $PassedPadded" -ForegroundColor Gray + Write-Host " • Failed : $FailedPadded" -ForegroundColor Gray +} + Write-Host "" # Determine overall exit code @@ -127,7 +171,7 @@ if ($TestExitCode -ne 0) { } if ($OverallExitCode -eq 0) { - Write-Host "🎉 All checks passed! Code is ready for commit." -ForegroundColor Green + Write-Host "🎉 All checks passed! Code is ready to commit." -ForegroundColor Green } else { Write-Host "⚠️ Some checks did not pass. Please review and fix issues." -ForegroundColor Yellow } diff --git a/tests/python/check_python.sh b/tests/python/check_python.sh index a3f8886..0e3093f 100644 --- a/tests/python/check_python.sh +++ b/tests/python/check_python.sh @@ -67,10 +67,18 @@ echo "━━━━━━━━━━━━━━━━━━━━━━━━ echo "" set +e -"$SCRIPT_DIR/run_tests.sh" +TEST_OUTPUT=$("$SCRIPT_DIR/run_tests.sh" 2>&1) TEST_EXIT_CODE=$? set -e +# Print the test output +echo "$TEST_OUTPUT" + +# Parse test results from output +PASSED_TESTS=$(echo "$TEST_OUTPUT" | grep -oE '[0-9]+ passed' | head -1 | grep -oE '[0-9]+' || echo "0") +FAILED_TESTS=$(echo "$TEST_OUTPUT" | grep -oE '[0-9]+ failed' | head -1 | grep -oE '[0-9]+' || echo "0") +TOTAL_TESTS=$((PASSED_TESTS + FAILED_TESTS)) + echo "" @@ -83,24 +91,32 @@ echo "║ Final Results ║" echo "╚═══════════════════════════════════════════════════════════╝" echo "" +# Determine Pylint status if [ $LINT_EXIT_CODE -eq 0 ]; then - if [ -n "$PYLINT_SCORE" ]; then - echo " Pylint: ✅ PASSED ($PYLINT_SCORE)" - else - echo " Pylint: ✅ PASSED" - fi + LINT_STATUS="✅ PASSED" else - if [ -n "$PYLINT_SCORE" ]; then - echo " Pylint: ⚠️ ISSUES FOUND ($PYLINT_SCORE)" - else - echo " Pylint: ⚠️ ISSUES FOUND" - fi + LINT_STATUS="⚠️ ISSUES FOUND" fi +# Determine Test status if [ $TEST_EXIT_CODE -eq 0 ]; then - echo " Tests: ✅ PASSED" + TEST_STATUS="✅ PASSED" else - echo " Tests: ❌ FAILED" + TEST_STATUS="❌ FAILED" +fi + +# Display results with proper alignment +echo "Pylint : $LINT_STATUS" +if [ -n "$PYLINT_SCORE" ]; then + echo " ($PYLINT_SCORE)" +fi + +echo "Tests : $TEST_STATUS" +if [ $TOTAL_TESTS -gt 0 ]; then + # Right-align numbers with padding + printf " • Total : %5d\n" "$TOTAL_TESTS" + printf " • Passed : %5d\n" "$PASSED_TESTS" + printf " • Failed : %5d\n" "$FAILED_TESTS" fi echo "" @@ -115,7 +131,7 @@ if [ $TEST_EXIT_CODE -ne 0 ]; then fi if [ $OVERALL_EXIT_CODE -eq 0 ]; then - echo "🎉 All checks passed! Code is ready for commit." + echo "🎉 All checks passed! Code is ready to commit." else echo "⚠️ Some checks did not pass. Please review and fix issues." fi diff --git a/tests/python/conftest.py b/tests/python/conftest.py index 84d0376..5780c83 100644 --- a/tests/python/conftest.py +++ b/tests/python/conftest.py @@ -10,6 +10,21 @@ # Add the shared/python directory to the Python path for all tests sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../shared/python'))) +# Add the tests/python directory to import test_helpers +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) + +# APIM Samples imports +# pylint: disable=wrong-import-position +from test_helpers import ( + create_mock_http_response, + create_mock_output, + create_sample_apis, + create_sample_policy_fragments, + get_sample_infrastructure_params, + MockApimRequestsPatches, + MockInfrastructuresPatches +) + # ------------------------------ # SHARED FIXTURES @@ -34,3 +49,81 @@ def sample_test_data() -> dict[str, Any]: 'test_resource_group': 'rg-test-apim-01', 'test_location': 'eastus2' } + + +# ------------------------------ +# MOCK FIXTURES +# ------------------------------ + +@pytest.fixture(autouse=True) +def infrastructures_patches(): + """Automatically patch infrastructures dependencies for tests.""" + with MockInfrastructuresPatches() as patches: + yield patches + + +@pytest.fixture +def mock_utils(infrastructures_patches): + """Return the patched utils module for infrastructures tests.""" + return infrastructures_patches.utils + + +@pytest.fixture +def mock_az(infrastructures_patches): + """Return the patched azure_resources module for infrastructures tests.""" + return infrastructures_patches.az + + +@pytest.fixture +def mock_az_success(): + """Pre-configured successful Azure CLI output.""" + return create_mock_output(success=True, json_data={'result': 'success'}) + + +@pytest.fixture +def mock_az_failure(): + """Pre-configured failed Azure CLI output.""" + return create_mock_output(success=False, text='Error message') + + +@pytest.fixture +def sample_policy_fragments(): + """Provide sample policy fragments for testing.""" + return create_sample_policy_fragments(count=2) + + +@pytest.fixture +def sample_apis(): + """Provide sample APIs for testing.""" + return create_sample_apis(count=2) + + +@pytest.fixture +def sample_infrastructure_params() -> dict[str, Any]: + """Provide common infrastructure parameters.""" + return get_sample_infrastructure_params() + + +@pytest.fixture +def mock_http_response_200(): + """Pre-configured successful HTTP response.""" + return create_mock_http_response( + status_code=200, + json_data={'result': 'ok'} + ) + + +@pytest.fixture +def mock_http_response_error(): + """Pre-configured error HTTP response.""" + return create_mock_http_response( + status_code=500, + text='Internal Server Error' + ) + + +@pytest.fixture +def apimrequests_patches(): + """Provide common apimrequests patches for HTTP tests.""" + with MockApimRequestsPatches() as patches: + yield patches diff --git a/tests/python/run_pylint.ps1 b/tests/python/run_pylint.ps1 index 30c7e79..a018bd4 100644 --- a/tests/python/run_pylint.ps1 +++ b/tests/python/run_pylint.ps1 @@ -53,18 +53,31 @@ Write-Host " Working Directory : $RepoRoot" -ForegroundColor Gray Write-Host " Pylint Config : $PylintRc`n" -ForegroundColor Gray # Run pylint with multiple output formats -$JsonReport = "$ReportDir/pylint_${Timestamp}.json" -$TextReport = "$ReportDir/pylint_${Timestamp}.txt" -$LatestJson = "$ReportDir/latest.json" -$LatestText = "$ReportDir/latest.txt" +$JsonReport = Join-Path $ReportDir "pylint_${Timestamp}.json" +$TextReport = Join-Path $ReportDir "pylint_${Timestamp}.txt" +$LatestJson = Join-Path $ReportDir "latest.json" +$LatestText = Join-Path $ReportDir "latest.txt" + +$ReportDirRelative = [IO.Path]::GetRelativePath($RepoRoot, $ReportDir) -replace "\\", "/" +$JsonReportRelative = "$ReportDirRelative/pylint_${Timestamp}.json" +$TextReportRelative = "$ReportDirRelative/pylint_${Timestamp}.txt" # Change to repository root and execute pylint Push-Location $RepoRoot try { pylint --rcfile "$PylintRc" ` - --output-format=json:$JsonReport,colorized,text:$TextReport ` - $Target.Split(' ') - $ExitCode = $LASTEXITCODE + --output-format=json ` + $Target.Split(' ') ` + | Tee-Object -FilePath $JsonReport | Out-Null + $JsonExitCode = $LASTEXITCODE + + pylint --rcfile "$PylintRc" ` + --output-format=text ` + $Target.Split(' ') ` + | Tee-Object -FilePath $TextReport + $TextExitCode = $LASTEXITCODE + + $ExitCode = if ($JsonExitCode -ne 0) { $JsonExitCode } else { $TextExitCode } } finally { Pop-Location } diff --git a/tests/python/run_tests.ps1 b/tests/python/run_tests.ps1 index 3e88dde..0fd5d65 100644 --- a/tests/python/run_tests.ps1 +++ b/tests/python/run_tests.ps1 @@ -18,8 +18,17 @@ $env:PYTHONUNBUFFERED = "1" Push-Location $RepoRoot try { - $env:COVERAGE_FILE = (Join-Path $RepoRoot "tests/python/.coverage") - pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov tests/python/ + $env:COVERAGE_FILE = (Join-Path $RepoRoot ".coverage") + pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ + + # Display coverage summary + Write-Host "`nCoverage Summary:" -ForegroundColor Green + coverage report --skip-covered + + Write-Host "`n✅ Coverage reports generated:" -ForegroundColor Green + Write-Host " - HTML: tests/python/htmlcov/index.html" -ForegroundColor Cyan + Write-Host " - XML: coverage.xml (for VS Code)" -ForegroundColor Cyan + Write-Host " - JSON: coverage.json" -ForegroundColor Cyan } finally { Pop-Location diff --git a/tests/python/run_tests.sh b/tests/python/run_tests.sh index ddbe0cc..69e4dc2 100644 --- a/tests/python/run_tests.sh +++ b/tests/python/run_tests.sh @@ -16,5 +16,16 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)" cd "${REPO_ROOT}" -export COVERAGE_FILE="tests/python/.coverage" -pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov tests/python/ +export COVERAGE_FILE=".coverage" +pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ + +# Display coverage summary +echo "" +echo "Coverage Summary:" +coverage report --skip-covered + +echo "" +echo "✅ Coverage reports generated:" +echo " - HTML: htmlcov/index.html" +echo " - XML: coverage.xml (for VS Code)" +echo " - JSON: coverage.json" diff --git a/tests/python/test_apimrequests.py b/tests/python/test_apimrequests.py index 2d91cf4..5e2066e 100644 --- a/tests/python/test_apimrequests.py +++ b/tests/python/test_apimrequests.py @@ -5,6 +5,7 @@ # APIM Samples imports from apimrequests import ApimRequests from apimtypes import SUBSCRIPTION_KEY_PARAMETER_NAME, HTTP_VERB +from test_helpers import create_mock_http_response, create_mock_session_with_response # Sample values for tests DEFAULT_URL = 'https://example.com/apim/' @@ -37,92 +38,64 @@ def test_init_no_key(): assert apim.headers['Accept'] == 'application/json' @pytest.mark.http -@patch('apimrequests.requests.request') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -@patch('apimrequests.print_error') -def test_single_get_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_response.raise_for_status.return_value = None - mock_request.return_value = mock_response +def test_single_get_success(apim, apimrequests_patches, mock_http_response_200): + apimrequests_patches.request.return_value = mock_http_response_200 with patch.object(apim, '_print_response') as mock_print_response: result = apim.singleGet(DEFAULT_PATH, printResponse=True) - assert result == '{\n "result": "ok"\n}' - mock_print_response.assert_called_once_with(mock_response) - mock_print_error.assert_not_called() + + assert result == '{\n "result": "ok"\n}' + mock_print_response.assert_called_once_with(mock_http_response_200) + apimrequests_patches.print_error.assert_not_called() @pytest.mark.http -@patch('apimrequests.requests.request') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -@patch('apimrequests.print_error') -def test_single_get_error(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): - mock_request.side_effect = requests.exceptions.RequestException('fail') +def test_single_get_error(apim, apimrequests_patches): + apimrequests_patches.request.side_effect = requests.exceptions.RequestException('fail') result = apim.singleGet(DEFAULT_PATH, printResponse=True) assert result is None - mock_print_error.assert_called_once() + apimrequests_patches.print_error.assert_called_once() @pytest.mark.http -@patch('apimrequests.requests.request') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -@patch('apimrequests.print_error') -def test_single_post_success(mock_print_error, mock_print_info, mock_print_message, mock_request, apim): - mock_response = MagicMock() - mock_response.status_code = 201 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'created': True} - mock_response.text = '{"created": true}' - mock_response.raise_for_status.return_value = None - mock_request.return_value = mock_response +def test_single_post_success(apim, apimrequests_patches): + response = create_mock_http_response( + status_code=201, + json_data={'created': True} + ) + apimrequests_patches.request.return_value = response with patch.object(apim, '_print_response') as mock_print_response: result = apim.singlePost(DEFAULT_PATH, data=DEFAULT_DATA, printResponse=True) - assert result == '{\n "created": true\n}' - mock_print_response.assert_called_once_with(mock_response) - mock_print_error.assert_not_called() + + assert result == '{\n "created": true\n}' + mock_print_response.assert_called_once_with(response) + apimrequests_patches.print_error.assert_not_called() @pytest.mark.http -@patch('apimrequests.requests.Session') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -def test_multi_get_success(mock_print_info, mock_print_message, mock_session, apim): - mock_sess = MagicMock() - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_response.raise_for_status.return_value = None - mock_sess.request.return_value = mock_response - mock_session.return_value = mock_sess - - with patch.object(apim, '_print_response_code') as mock_print_code: - result = apim.multiGet(DEFAULT_PATH, runs=2, printResponse=True) - assert len(result) == 2 - for run in result: - assert run['status_code'] == 200 - assert run['response'] == '{\n "result": "ok"\n}' - assert mock_sess.request.call_count == 2 - mock_print_code.assert_called() +def test_multi_get_success(apim, apimrequests_patches, mock_http_response_200): + with patch('apimrequests.requests.Session') as session_cls: + session = create_mock_session_with_response(mock_http_response_200) + session_cls.return_value = session + + with patch.object(apim, '_print_response_code') as mock_print_code: + result = apim.multiGet(DEFAULT_PATH, runs=2, printResponse=True) + + assert len(result) == 2 + for run in result: + assert run['status_code'] == 200 + assert run['response'] == '{\n "result": "ok"\n}' + assert session.request.call_count == 2 + mock_print_code.assert_called() @pytest.mark.http -@patch('apimrequests.requests.Session') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -def test_multi_get_error(mock_print_info, mock_print_message, mock_session, apim): - mock_sess = MagicMock() - mock_sess.request.side_effect = requests.exceptions.RequestException('fail') - mock_session.return_value = mock_sess - with patch.object(apim, '_print_response_code'): - # Should raise inside the loop and propagate the exception, ensuring the session is closed - with pytest.raises(requests.exceptions.RequestException): - apim.multiGet(DEFAULT_PATH, runs=1, printResponse=True) +def test_multi_get_error(apim, apimrequests_patches): + with patch('apimrequests.requests.Session') as session_cls: + session = MagicMock() + session.request.side_effect = requests.exceptions.RequestException('fail') + session_cls.return_value = session + + with patch.object(apim, '_print_response_code'): + with pytest.raises(requests.exceptions.RequestException): + apim.multiGet(DEFAULT_PATH, runs=1, printResponse=True) # Sample values for tests @@ -224,53 +197,48 @@ def test_subscription_key_setter_updates_and_clears_header(): # ------------------------------ @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_with_custom_headers(mock_request, apim): +def test_request_with_custom_headers(apim, apimrequests_patches): """Test request with custom headers merged with default headers.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.json.return_value = {'result': 'ok'} - mock_response.raise_for_status.return_value = None - mock_request.return_value = mock_response + apimrequests_patches.request.return_value = create_mock_http_response( + status_code=200, + json_data={'result': 'ok'} + ) custom_headers = {'Custom': 'value'} apim.singleGet(DEFAULT_PATH, headers=custom_headers) # Verify custom headers were merged with default headers - call_kwargs = mock_request.call_args[1] + call_kwargs = apimrequests_patches.request.call_args[1] assert 'Custom' in call_kwargs['headers'] assert SUBSCRIPTION_KEY_PARAMETER_NAME in call_kwargs['headers'] @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_timeout_error(mock_request, apim): +def test_request_timeout_error(apim, apimrequests_patches): """Test request with timeout error.""" - mock_request.side_effect = requests.exceptions.Timeout() + apimrequests_patches.request.side_effect = requests.exceptions.Timeout() result = apim.singleGet(DEFAULT_PATH) assert result is None @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_connection_error(mock_request, apim): +def test_request_connection_error(apim, apimrequests_patches): """Test request with connection error.""" - mock_request.side_effect = requests.exceptions.ConnectionError() + apimrequests_patches.request.side_effect = requests.exceptions.ConnectionError() result = apim.singleGet(DEFAULT_PATH) assert result is None @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_http_error(mock_request, apim): +def test_request_http_error(apim, apimrequests_patches): """Test request with HTTP error response.""" - mock_response = MagicMock() - mock_response.status_code = 404 - mock_response.reason = 'Not Found' - mock_response.headers = {'Content-Type': 'text/plain'} - mock_response.text = 'Resource not found' - mock_request.return_value = mock_response + response = create_mock_http_response( + status_code=404, + headers={'Content-Type': 'text/plain'}, + text='Resource not found' + ) + apimrequests_patches.request.return_value = response result = apim.singleGet(DEFAULT_PATH) @@ -278,15 +246,15 @@ def test_request_http_error(mock_request, apim): assert result == 'Resource not found' @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_non_json_response(mock_request, apim): +def test_request_non_json_response(apim, apimrequests_patches): """Test request with non-JSON response.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'text/plain'} - mock_response.json.side_effect = ValueError('Not JSON') - mock_response.text = 'Plain text response' - mock_request.return_value = mock_response + response = create_mock_http_response( + status_code=200, + headers={'Content-Type': 'text/plain'}, + text='Plain text response' + ) + response.json.side_effect = ValueError('Not JSON') + apimrequests_patches.request.return_value = response result = apim.singleGet(DEFAULT_PATH) @@ -294,21 +262,18 @@ def test_request_non_json_response(mock_request, apim): assert result == 'Plain text response' @pytest.mark.unit -@patch('apimrequests.requests.request') -def test_request_with_data(mock_request, apim): +def test_request_with_data(apim, apimrequests_patches): """Test POST request with data.""" - mock_response = MagicMock() - mock_response.status_code = 201 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'created': True} - mock_response.text = '{"created": true}' - mock_request.return_value = mock_response + apimrequests_patches.request.return_value = create_mock_http_response( + status_code=201, + json_data={'created': True} + ) data = {'name': 'test', 'value': 'data'} result = apim.singlePost(DEFAULT_PATH, data=data) # Verify data was passed correctly - call_kwargs = mock_request.call_args[1] + call_kwargs = apimrequests_patches.request.call_args[1] assert call_kwargs['json'] == data # The method returns JSON-formatted string for application/json content assert result == '{\n "created": true\n}' @@ -333,86 +298,61 @@ def test_headers_setter(apim): @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -def test_request_with_message(mock_print_info, mock_print_message, mock_request, apim): +def test_request_with_message(apim, apimrequests_patches): """Test _request method with message parameter.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_request.return_value = mock_response + apimrequests_patches.request.return_value = create_mock_http_response( + status_code=200, + json_data={'result': 'ok'} + ) with patch.object(apim, '_print_response'): apim._request(HTTP_VERB.GET, '/test', msg='Test message') - mock_print_message.assert_called_once_with('Test message', blank_above=True) + apimrequests_patches.print_message.assert_called_once_with('Test message', blank_above=True) @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_info') -def test_request_path_without_leading_slash(mock_print_info, mock_request, apim): +def test_request_path_without_leading_slash(apim, apimrequests_patches): """Test _request method with PATH without leading slash.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_request.return_value = mock_response + apimrequests_patches.request.return_value = create_mock_http_response( + status_code=200, + json_data={'result': 'ok'} + ) with patch.object(apim, '_print_response'): apim._request(HTTP_VERB.GET, 'test') # Should call with the corrected URL expected_url = DEFAULT_URL + '/test' - mock_request.assert_called_once() - args, _kwargs = mock_request.call_args + apimrequests_patches.request.assert_called_once() + args, _kwargs = apimrequests_patches.request.call_args assert args[1] == expected_url - @pytest.mark.unit -@patch('apimrequests.requests.Session') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -def test_multi_request_with_message(mock_print_info, mock_print_message, mock_session_class, apim): - """Test _multiRequest method with message parameter.""" - mock_session = MagicMock() - mock_session_class.return_value = mock_session +def test_multi_request_with_message(apim, apimrequests_patches): + """Test _multiRequest supports optional message output.""" + response = create_mock_http_response(json_data={'result': 'ok'}) + with patch('apimrequests.requests.Session') as mock_session_cls: + mock_session = create_mock_session_with_response(response) + mock_session_cls.return_value = mock_session - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_session.request.return_value = mock_response - - with patch.object(apim, '_print_response_code'): - result = apim._multiRequest(HTTP_VERB.GET, '/test', 1, msg='Multi-request message') + with patch.object(apim, '_print_response_code'): + result = apim._multiRequest(HTTP_VERB.GET, '/test', 1, msg='Multi-request message') - mock_print_message.assert_called_once_with('Multi-request message', blank_above=True) + apimrequests_patches.print_message.assert_called_once_with('Multi-request message', blank_above=True) assert len(result) == 1 @pytest.mark.unit -@patch('apimrequests.requests.Session') -@patch('apimrequests.print_info') -def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_class, apim): +def test_multi_request_path_without_leading_slash(apim, apimrequests_patches): """Test _multiRequest method with PATH without leading slash.""" - mock_session = MagicMock() - mock_session_class.return_value = mock_session + response = create_mock_http_response(json_data={'result': 'ok'}) + with patch('apimrequests.requests.Session') as mock_session_cls: + mock_session = create_mock_session_with_response(response) + mock_session_cls.return_value = mock_session - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_session.request.return_value = mock_response - - with patch.object(apim, '_print_response_code'): - apim._multiRequest(HTTP_VERB.GET, 'test', 1) + with patch.object(apim, '_print_response_code'): + apim._multiRequest(HTTP_VERB.GET, 'test', 1) # Should call with the corrected URL expected_url = DEFAULT_URL + '/test' @@ -422,121 +362,108 @@ def test_multi_request_path_without_leading_slash(mock_print_info, mock_session_ @pytest.mark.unit -@patch('apimrequests.requests.Session') -@patch('apimrequests.print_info') -def test_multi_request_non_json_response(mock_print_info, mock_session_class, apim): +def test_multi_request_non_json_response(apim): """Test _multiRequest method with non-JSON response.""" - mock_session = MagicMock() - mock_session_class.return_value = mock_session + response = create_mock_http_response( + status_code=200, + headers={'Content-Type': 'text/plain'}, + text='Plain text response' + ) - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'text/plain'} - mock_response.text = 'Plain text response' - mock_session.request.return_value = mock_response + with patch('apimrequests.requests.Session') as mock_session_cls: + mock_session = create_mock_session_with_response(response) + mock_session_cls.return_value = mock_session - with patch.object(apim, '_print_response_code'): - result = apim._multiRequest(HTTP_VERB.GET, '/test', 1) + with patch.object(apim, '_print_response_code'): + result = apim._multiRequest(HTTP_VERB.GET, '/test', 1) assert len(result) == 1 assert result[0]['response'] == 'Plain text response' @pytest.mark.unit -@patch('apimrequests.time.sleep') -@patch('apimrequests.requests.Session') -def test_multi_request_sleep_zero(mock_session_class, mock_sleep, apim): +def test_multi_request_sleep_zero(apim): """Test _multiRequest respects sleepMs=0 without sleeping.""" - mock_session = MagicMock() - mock_session_class.return_value = mock_session + response = create_mock_http_response(json_data={'ok': True}) - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'ok': True} - mock_response.text = '{"ok": true}' - mock_session.request.return_value = mock_response + with patch('apimrequests.requests.Session') as mock_session_cls, \ + patch('apimrequests.time.sleep') as mock_sleep: + mock_session = create_mock_session_with_response(response) + mock_session_cls.return_value = mock_session - with patch.object(apim, '_print_response_code'): - result = apim._multiRequest(HTTP_VERB.GET, '/sleep', 1, sleepMs=0) + with patch.object(apim, '_print_response_code'): + result = apim._multiRequest(HTTP_VERB.GET, '/sleep', 1, sleepMs=0) assert result[0]['status_code'] == 200 mock_sleep.assert_not_called() @pytest.mark.unit -@patch('apimrequests.time.sleep') -@patch('apimrequests.requests.Session') -def test_multi_request_sleep_positive(mock_session_class, mock_sleep, apim): +def test_multi_request_sleep_positive(apim): """Test _multiRequest sleeps when sleepMs is positive.""" - mock_session = MagicMock() - mock_session_class.return_value = mock_session + response = create_mock_http_response(json_data={'ok': True}) - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'ok': True} - mock_response.text = '{"ok": true}' - mock_session.request.return_value = mock_response + with patch('apimrequests.requests.Session') as mock_session_cls, \ + patch('apimrequests.time.sleep') as mock_sleep: + mock_session = create_mock_session_with_response(response) + mock_session_cls.return_value = mock_session - with patch.object(apim, '_print_response_code'): - apim._multiRequest(HTTP_VERB.GET, '/sleep', 2, sleepMs = 150) + with patch.object(apim, '_print_response_code'): + apim._multiRequest(HTTP_VERB.GET, '/sleep', 2, sleepMs=150) mock_sleep.assert_called_once_with(0.15) @pytest.mark.unit -@patch('apimrequests.print_val') -def test_print_response_non_200_status(mock_print_val, apim): +def test_print_response_non_200_status(apim, apimrequests_patches): """Test _print_response method with non-200 status code.""" - mock_response = MagicMock() - mock_response.status_code = 404 + mock_response = create_mock_http_response( + status_code=404, + headers={'Content-Type': 'application/json'}, + text='{"error": "not found"}' + ) mock_response.reason = 'Not Found' - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.text = '{"error": "not found"}' with patch.object(apim, '_print_response_code'): apim._print_response(mock_response) # Should print response body directly for non-200 status - mock_print_val.assert_any_call('Response body', '{"error": "not found"}', True) + apimrequests_patches.print_val.assert_any_call('Response body', '{"error": "not found"}', True) @pytest.mark.unit -@patch('apimrequests.print_val') -def test_print_response_200_invalid_json(mock_print_val, apim): +def test_print_response_200_invalid_json(apim, apimrequests_patches): """Test _print_response handles invalid JSON body for 200 responses.""" - mock_response = MagicMock() - mock_response.status_code = 200 + mock_response = create_mock_http_response( + status_code=200, + headers={'Content-Type': 'application/json'}, + text='not valid json' + ) mock_response.reason = 'OK' - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.text = 'not valid json' with patch.object(apim, '_print_response_code'): apim._print_response(mock_response) - mock_print_val.assert_any_call('Response body', 'not valid json', True) + apimrequests_patches.print_val.assert_any_call('Response body', 'not valid json', True) @pytest.mark.unit -@patch('apimrequests.print_val') -def test_print_response_200_valid_json(mock_print_val, apim): +def test_print_response_200_valid_json(apim, apimrequests_patches): """Test _print_response prints formatted JSON when parse succeeds.""" - mock_response = MagicMock() - mock_response.status_code = 200 + mock_response = create_mock_http_response( + status_code=200, + json_data={'alpha': 1} + ) mock_response.reason = 'OK' - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.text = '{"alpha": 1}' with patch.object(apim, '_print_response_code'): apim._print_response(mock_response) - mock_print_val.assert_any_call('Response body', '{\n "alpha": 1\n}', True) + apimrequests_patches.print_val.assert_any_call('Response body', '{\n "alpha": 1\n}', True) @pytest.mark.unit -@patch('apimrequests.print_val') -def test_print_response_code_success_and_error(mock_print_val, apim): +def test_print_response_code_success_and_error(apim, apimrequests_patches): """Test _print_response_code color formatting for success and error codes.""" class DummyResponse: status_code = 200 @@ -550,44 +477,35 @@ class ErrorResponse: apim._print_response_code(ErrorResponse()) - messages = [record.args[1] for record in mock_print_val.call_args_list] + messages = [record.args[1] for record in apimrequests_patches.print_val.call_args_list] assert any('200 - OK' in msg for msg in messages) assert any('500 - Server Error' in msg for msg in messages) @pytest.mark.unit -@patch('apimrequests.requests.get') -@patch('apimrequests.print_info') -@patch('apimrequests.print_ok') -@patch('apimrequests.time.sleep') -def test_poll_async_operation_success(mock_sleep, mock_print_ok, mock_print_info, mock_get, apim): +def test_poll_async_operation_success(apim, apimrequests_patches): """Test _poll_async_operation method with successful completion.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_get.return_value = mock_response - - result = apim._poll_async_operation('http://example.com/operation/123') + mock_response = create_mock_http_response(status_code=200) + with patch('apimrequests.requests.get', return_value=mock_response): + with patch('apimrequests.time.sleep'): + result = apim._poll_async_operation('http://example.com/operation/123') assert result == mock_response - mock_print_ok.assert_called_once_with('Async operation completed successfully!') + apimrequests_patches.print_ok.assert_called_once_with('Async operation completed successfully!') @pytest.mark.unit -@patch('apimrequests.requests.get') -@patch('apimrequests.print_info') -@patch('apimrequests.print_error') -@patch('apimrequests.time.sleep') -def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_error, mock_print_info, mock_get, apim): +def test_poll_async_operation_in_progress_then_success(apim, apimrequests_patches): """Test _poll_async_operation method with in-progress then success.""" # First call returns 202 (in progress), second call returns 200 (complete) responses = [ MagicMock(status_code=202), MagicMock(status_code=200) ] - mock_get.side_effect = responses - - result = apim._poll_async_operation('http://example.com/operation/123', poll_interval=1) + with patch('apimrequests.requests.get', side_effect=responses) as mock_get, \ + patch('apimrequests.time.sleep') as mock_sleep: + result = apim._poll_async_operation('http://example.com/operation/123', poll_interval=1) assert result == responses[1] # Should return the final success response assert mock_get.call_count == 2 @@ -595,39 +513,28 @@ def test_poll_async_operation_in_progress_then_success(mock_sleep, mock_print_er @pytest.mark.unit -@patch('apimrequests.requests.get') -@patch('apimrequests.print_error') -def test_poll_async_operation_unexpected_status(mock_print_error, mock_get, apim): +def test_poll_async_operation_unexpected_status(apim, apimrequests_patches): """Test _poll_async_operation method with unexpected status code.""" - mock_response = MagicMock() - mock_response.status_code = 500 - mock_get.return_value = mock_response - - result = apim._poll_async_operation('http://example.com/operation/123') + mock_response = MagicMock(status_code=500) + with patch('apimrequests.requests.get', return_value=mock_response): + result = apim._poll_async_operation('http://example.com/operation/123') assert result == mock_response # Should return the error response - mock_print_error.assert_called_with('Unexpected status code during polling: 500') + apimrequests_patches.print_error.assert_called_with('Unexpected status code during polling: 500') @pytest.mark.unit -@patch('apimrequests.requests.get') -@patch('apimrequests.print_error') -def test_poll_async_operation_request_exception(mock_print_error, mock_get, apim): +def test_poll_async_operation_request_exception(apim, apimrequests_patches): """Test _poll_async_operation method with request exception.""" - mock_get.side_effect = requests.exceptions.RequestException('Connection error') - - result = apim._poll_async_operation('http://example.com/operation/123') + with patch('apimrequests.requests.get', side_effect=requests.exceptions.RequestException('Connection error')): + result = apim._poll_async_operation('http://example.com/operation/123') assert result is None - mock_print_error.assert_called_with('Error polling operation: Connection error') + apimrequests_patches.print_error.assert_called_with('Error polling operation: Connection error') @pytest.mark.unit -@patch('apimrequests.requests.get') -@patch('apimrequests.print_error') -@patch('apimrequests.time.time') -@patch('apimrequests.time.sleep') -def test_poll_async_operation_timeout(mock_sleep, mock_time, mock_print_error, mock_get, apim): +def test_poll_async_operation_timeout(apim, apimrequests_patches): """Test _poll_async_operation method with timeout.""" # Mock time to simulate timeout. # Note: patching `time.time` affects the shared `time` module, which is also @@ -639,23 +546,19 @@ def time_side_effect(): return times.pop(0) return times[0] - mock_time.side_effect = time_side_effect + mock_response = MagicMock(status_code=202) - mock_response = MagicMock() - mock_response.status_code = 202 - mock_get.return_value = mock_response - - result = apim._poll_async_operation('http://example.com/operation/123', timeout=60) + with patch('apimrequests.requests.get', return_value=mock_response), \ + patch('apimrequests.time.sleep'), \ + patch('apimrequests.time.time', side_effect=time_side_effect): + result = apim._poll_async_operation('http://example.com/operation/123', timeout=60) assert result is None - mock_print_error.assert_called_with('Async operation timeout reached after 60 seconds') + apimrequests_patches.print_error.assert_called_with('Async operation timeout reached after 60 seconds') @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_message') -@patch('apimrequests.print_info') -def test_single_post_async_success_with_location(mock_print_info, mock_print_message, mock_request, apim): +def test_single_post_async_success_with_location(apim, apimrequests_patches): """Test singlePostAsync method with successful async operation.""" # Mock initial 202 response with Location header initial_response = MagicMock() @@ -663,54 +566,47 @@ def test_single_post_async_success_with_location(mock_print_info, mock_print_mes initial_response.headers = {'Location': 'http://example.com/operation/123'} # Mock final 200 response - final_response = MagicMock() - final_response.status_code = 200 - final_response.headers = {'Content-Type': 'application/json'} - final_response.json.return_value = {'result': 'completed'} - final_response.text = '{"result": "completed"}' + final_response = create_mock_http_response( + status_code=200, + json_data={'result': 'completed'} + ) - mock_request.return_value = initial_response + apimrequests_patches.request.return_value = initial_response with patch.object(apim, '_poll_async_operation', return_value=final_response) as mock_poll: with patch.object(apim, '_print_response') as mock_print_response: result = apim.singlePostAsync('/test', data={'test': 'data'}, msg='Async test') - mock_print_message.assert_called_once_with('Async test', blank_above=True) + apimrequests_patches.print_message.assert_called_once_with('Async test', blank_above=True) mock_poll.assert_called_once() mock_print_response.assert_called_once_with(final_response) assert result == '{\n "result": "completed"\n}' @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_info') -@patch('apimrequests.print_error') -def test_single_post_async_no_location_header(mock_print_error, mock_print_info, mock_request, apim): +def test_single_post_async_no_location_header(apim, apimrequests_patches): """Test singlePostAsync method with 202 response but no Location header.""" mock_response = MagicMock() mock_response.status_code = 202 mock_response.headers = {} # No Location header - mock_request.return_value = mock_response + apimrequests_patches.request.return_value = mock_response with patch.object(apim, '_print_response') as mock_print_response: result = apim.singlePostAsync('/test') - mock_print_error.assert_called_once_with('No Location header found in 202 response') + apimrequests_patches.print_error.assert_called_once_with('No Location header found in 202 response') mock_print_response.assert_called_once_with(mock_response) assert result is None @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_info') -def test_single_post_async_non_async_response(mock_print_info, mock_request, apim): +def test_single_post_async_non_async_response(apim, apimrequests_patches): """Test singlePostAsync method with non-async (immediate) response.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'immediate'} - mock_response.text = '{"result": "immediate"}' - mock_request.return_value = mock_response + mock_response = create_mock_http_response( + status_code=200, + json_data={'result': 'immediate'} + ) + apimrequests_patches.request.return_value = mock_response with patch.object(apim, '_print_response') as mock_print_response: result = apim.singlePostAsync('/test') @@ -720,68 +616,60 @@ def test_single_post_async_non_async_response(mock_print_info, mock_request, api @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_error') -def test_single_post_async_request_exception(mock_print_error, mock_request, apim): +def test_single_post_async_request_exception(apim, apimrequests_patches): """Test singlePostAsync method with request exception.""" - mock_request.side_effect = requests.exceptions.RequestException('Connection error') + apimrequests_patches.request.side_effect = requests.exceptions.RequestException('Connection error') result = apim.singlePostAsync('/test') assert result is None - mock_print_error.assert_called_once_with('Error making request: Connection error') + apimrequests_patches.print_error.assert_called_once_with('Error making request: Connection error') @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_error') -def test_single_post_async_failed_polling(mock_print_error, mock_request, apim): +def test_single_post_async_failed_polling(apim, apimrequests_patches): """Test singlePostAsync method with failed async operation polling.""" initial_response = MagicMock() initial_response.status_code = 202 initial_response.headers = {'Location': 'http://example.com/operation/123'} - mock_request.return_value = initial_response + apimrequests_patches.request.return_value = initial_response with patch.object(apim, '_poll_async_operation', return_value=None) as mock_poll: result = apim.singlePostAsync('/test') mock_poll.assert_called_once() - mock_print_error.assert_called_once_with('Async operation failed or timed out') + apimrequests_patches.print_error.assert_called_once_with('Async operation failed or timed out') assert result is None @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_info') -def test_single_post_async_path_without_leading_slash(mock_print_info, mock_request, apim): +def test_single_post_async_path_without_leading_slash(apim, apimrequests_patches): """Test singlePostAsync method with PATH without leading slash.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'application/json'} - mock_response.json.return_value = {'result': 'ok'} - mock_response.text = '{"result": "ok"}' - mock_request.return_value = mock_response + mock_response = create_mock_http_response( + status_code=200, + json_data={'result': 'ok'} + ) + apimrequests_patches.request.return_value = mock_response with patch.object(apim, '_print_response'): apim.singlePostAsync('test') # Should call with the corrected URL expected_url = DEFAULT_URL + '/test' - mock_request.assert_called_once() - args, _kwargs = mock_request.call_args + apimrequests_patches.request.assert_called_once() + args, _kwargs = apimrequests_patches.request.call_args assert args[1] == expected_url @pytest.mark.unit -@patch('apimrequests.requests.request') -@patch('apimrequests.print_info') -def test_single_post_async_non_json_response(mock_print_info, mock_request, apim): +def test_single_post_async_non_json_response(apim, apimrequests_patches): """Test singlePostAsync method with non-JSON response.""" - mock_response = MagicMock() - mock_response.status_code = 200 - mock_response.headers = {'Content-Type': 'text/plain'} - mock_response.text = 'Plain text result' - mock_request.return_value = mock_response + mock_response = create_mock_http_response( + status_code=200, + headers={'Content-Type': 'text/plain'}, + text='Plain text result' + ) + apimrequests_patches.request.return_value = mock_response with patch.object(apim, '_print_response'): result = apim.singlePostAsync('/test') diff --git a/tests/python/test_apimtypes.py b/tests/python/test_apimtypes.py index 1ff157c..767ad65 100644 --- a/tests/python/test_apimtypes.py +++ b/tests/python/test_apimtypes.py @@ -1,1239 +1,800 @@ """ -Unit tests for apimtypes.py. +Unit tests for apimtypes.py """ +import importlib from pathlib import Path -import json +from unittest.mock import MagicMock, patch import pytest +import apimtypes # APIM Samples imports from apimtypes import API, APIMNetworkMode, APIM_SKU, APIOperation, BACKEND_XML_POLICY_PATH, DEFAULT_XML_POLICY_PATH, GET_APIOperation, \ GET_APIOperation2, get_project_root, HELLO_WORLD_XML_POLICY_PATH, HTTP_VERB, INFRASTRUCTURE, NamedValue, Output, PolicyFragment, \ POST_APIOperation, Product, REQUEST_HEADERS_XML_POLICY_PATH, Role, SUBSCRIPTION_KEY_PARAMETER_NAME, SLEEP_TIME_BETWEEN_REQUESTS_MS +from test_helpers import assert_policy_fragment_structure # ------------------------------ -# CONSTANTS +# BASE TEST CLASS FOR API # ------------------------------ -EXAMPLE_NAME = 'test-api' -EXAMPLE_DISPLAY_NAME = 'Test API' -EXAMPLE_PATH = '/test' -EXAMPLE_DESCRIPTION = 'A test API.' -EXAMPLE_POLICY_XML = '' -EXAMPLE_PRODUCT_NAMES = ['product1', 'product2'] +class TestAPICreation: + """Test suite for API object creation and attributes.""" + + @pytest.fixture + def base_api_params(self): + """Common API parameters.""" + return { + 'name': 'test-api', + 'displayName': 'Test API', + 'path': '/test', + 'description': 'A test API.', + 'policyXml': '', + 'operations': None + } + + def test_basic_creation(self, base_api_params): + """Test creation of API object with required fields.""" + api = API(**base_api_params) + + assert api.name == base_api_params['name'] + assert api.displayName == base_api_params['displayName'] + assert api.path == base_api_params['path'] + assert api.description == base_api_params['description'] + assert api.policyXml == base_api_params['policyXml'] + assert api.operations == [] + assert api.tags == [] + assert api.productNames == [] + + @pytest.mark.parametrize('tags', [ + ['tag1', 'tag2'], + ['single-tag'], + ['foo', 'bar', 'baz'] + ]) + def test_creation_with_tags(self, base_api_params, tags): + """Test creation of API object with tags.""" + api = API(**base_api_params, tags=tags) + assert api.tags == tags + + @pytest.mark.parametrize('product_names', [ + ['product1', 'product2'], + ['single-product'], + ['p1', 'p2', 'p3'] + ]) + def test_creation_with_product_names(self, base_api_params, product_names): + """Test creation of API object with product names.""" + api = API(**base_api_params, productNames=product_names) + assert api.productNames == product_names + + def test_creation_with_both_tags_and_products(self, base_api_params): + """Test creation of API object with both tags and product names.""" + tags = ['tag1', 'tag2'] + product_names = ['product1', 'product2'] + + api = API(**base_api_params, tags=tags, productNames=product_names) + + assert api.tags == tags + assert api.productNames == product_names + + @pytest.mark.parametrize('missing_field', [ + 'name', + 'displayName', + 'path', + 'description' + ]) + def test_missing_required_fields(self, base_api_params, missing_field): + """Test that missing required fields raise TypeError.""" + params = base_api_params.copy() + del params[missing_field] + + with pytest.raises(TypeError): + API(**params) + + +class TestAPIToDictSerialization: + """Test suite for API.to_dict() method.""" + + @pytest.fixture + def base_api(self): + """Create a basic API instance.""" + return API( + name='test-api', + displayName='Test API', + path='/test', + description='A test API.', + policyXml='' + ) + + def test_includes_tags_when_present(self, base_api): + """Test that to_dict includes tags when present.""" + base_api.tags = ['foo', 'bar'] + d = base_api.to_dict() + assert 'tags' in d + assert d['tags'] == ['foo', 'bar'] + + def test_omits_tags_when_empty(self, base_api): + """Test that to_dict omits tags when not set or empty.""" + d = base_api.to_dict() + assert 'tags' not in d or d['tags'] == [] + + def test_includes_product_names_when_present(self, base_api): + """Test that to_dict includes productNames when present.""" + base_api.productNames = ['product1', 'product2'] + d = base_api.to_dict() + assert 'productNames' in d + assert d['productNames'] == ['product1', 'product2'] + + def test_omits_product_names_when_empty(self, base_api): + """Test that to_dict omits productNames when not set or empty.""" + d = base_api.to_dict() + assert 'productNames' not in d or d['productNames'] == [] + + +class TestAPIComparisons: + """Test suite for API equality and inequality.""" + + @pytest.fixture + def sample_api(self): + """Create a sample API for comparison tests.""" + return API( + name='test-api', + displayName='Test API', + path='/test', + description='A test API.', + policyXml='', + tags=['a', 'b'] + ) + + def test_equality_same_attributes(self, sample_api): + """Test equality comparison for identical API objects.""" + api2 = API( + name='test-api', + displayName='Test API', + path='/test', + description='A test API.', + policyXml='', + tags=['a', 'b'] + ) + assert sample_api == api2 + + @pytest.mark.parametrize('changed_attr,new_value', [ + ('name', 'other-api'), + ('tags', ['x']), + ('productNames', ['different-product']) + ]) + def test_inequality_different_attributes(self, sample_api, changed_attr, new_value): + """Test inequality for API objects with different attributes.""" + params = { + 'name': 'test-api', + 'displayName': 'Test API', + 'path': '/test', + 'description': 'A test API.', + 'policyXml': '', + 'tags': ['a', 'b'] + } + params[changed_attr] = new_value + + api2 = API(**params) + assert sample_api != api2 + + def test_repr(self, sample_api): + """Test __repr__ method of API.""" + result = repr(sample_api) + assert 'API' in result + assert sample_api.name in result + assert sample_api.displayName in result # ------------------------------ -# TEST METHODS +# ENUM TESTS # ------------------------------ -@pytest.mark.unit -def test_api_creation(): - """Test creation of API object and its attributes.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - - assert api.name == EXAMPLE_NAME - assert api.displayName == EXAMPLE_DISPLAY_NAME - assert api.path == EXAMPLE_PATH - assert api.description == EXAMPLE_DESCRIPTION - assert api.policyXml == EXAMPLE_POLICY_XML - assert api.operations == [] - assert api.tags == [] - assert api.productNames == [] - -@pytest.mark.unit -def test_api_creation_with_tags(): - """Test creation of API object with tags.""" - tags = ['tag1', 'tag2'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = tags - ) - assert api.tags == tags - -@pytest.mark.unit -def test_api_creation_with_product_names(): - """Test creation of API object with product names.""" - product_names = ['product1', 'product2'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - productNames = product_names - ) - assert api.productNames == product_names - -@pytest.mark.unit -def test_api_to_dict_includes_tags(): - """Test that to_dict includes tags when present.""" - tags = ['foo', 'bar'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = tags - ) - d = api.to_dict() - assert 'tags' in d - assert d['tags'] == tags - -@pytest.mark.unit -def test_api_to_dict_omits_tags_when_empty(): - """Test that to_dict omits tags when not set or empty.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - d = api.to_dict() - assert 'tags' not in d or d['tags'] == [] - -@pytest.mark.unit -def test_api_to_dict_includes_product_names(): - """Test that to_dict includes productNames when present.""" - product_names = ['product1', 'product2'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - productNames = product_names - ) - d = api.to_dict() - assert 'productNames' in d - assert d['productNames'] == product_names - -@pytest.mark.unit -def test_api_to_dict_omits_product_names_when_empty(): - """Test that to_dict omits productNames when not set or empty.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - d = api.to_dict() - assert 'productNames' not in d or d['productNames'] == [] - -@pytest.mark.unit -def test_api_with_both_tags_and_product_names(): - """Test creation of API object with both tags and product names.""" - tags = ['tag1', 'tag2'] - product_names = ['product1', 'product2'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = tags, - productNames = product_names - ) - assert api.tags == tags - assert api.productNames == product_names - - d = api.to_dict() - assert d['tags'] == tags - assert d['productNames'] == product_names - -@pytest.mark.unit -def test_api_repr(): - """Test __repr__ method of API.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - result = repr(api) - assert 'API' in result - assert EXAMPLE_NAME in result - assert EXAMPLE_DISPLAY_NAME in result - -@pytest.mark.unit -def test_api_equality(): - """Test equality comparison for API objects. - """ - api1 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = ['a', 'b'] - ) - api2 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = ['a', 'b'] - ) - assert api1 == api2 - - # Different tags should not be equal - api3 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = ['x'] - ) - assert api1 != api3 - - # Different product names should not be equal - api4 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = ['a', 'b'], - productNames = ['different-product'] - ) - assert api1 != api4 - -def test_api_inequality(): - """ - Test inequality for API objects with different attributes. - """ - api1 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - api2 = API( - name = 'other-api', - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - assert api1 != api2 - -def test_api_missing_fields(): - """ - Test that missing required fields raise TypeError. - """ - with pytest.raises(TypeError): - API( - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML +class TestEnums: + """Test suite for all enum types.""" + + @pytest.mark.parametrize('enum_value,expected', [ + (APIMNetworkMode.PUBLIC, 'Public'), + (APIMNetworkMode.EXTERNAL_VNET, 'External'), + (APIMNetworkMode.INTERNAL_VNET, 'Internal'), + (APIMNetworkMode.NONE, 'None') + ]) + def test_apim_network_mode(self, enum_value, expected): + """Test APIMNetworkMode enum values.""" + assert enum_value == expected + + @pytest.mark.parametrize('enum_value,expected', [ + (APIM_SKU.DEVELOPER, 'Developer'), + (APIM_SKU.BASIC, 'Basic'), + (APIM_SKU.STANDARD, 'Standard'), + (APIM_SKU.PREMIUM, 'Premium'), + (APIM_SKU.BASICV2, 'Basicv2'), + (APIM_SKU.STANDARDV2, 'Standardv2'), + (APIM_SKU.PREMIUMV2, 'Premiumv2') + ]) + def test_apim_sku(self, enum_value, expected): + """Test APIM_SKU enum values.""" + assert enum_value == expected + + @pytest.mark.parametrize('sku', [ + APIM_SKU.DEVELOPER, + APIM_SKU.BASIC, + APIM_SKU.STANDARD, + APIM_SKU.PREMIUM + ]) + def test_apim_sku_is_v1(self, sku): + """Test APIM_SKU.is_v1() method for v1 SKUs.""" + assert sku.is_v1() is True + assert sku.is_v2() is False + + @pytest.mark.parametrize('sku', [ + APIM_SKU.BASICV2, + APIM_SKU.STANDARDV2, + APIM_SKU.PREMIUMV2 + ]) + def test_apim_sku_is_v2(self, sku): + """Test APIM_SKU.is_v2() method for v2 SKUs.""" + assert sku.is_v2() is True + assert sku.is_v1() is False + + @pytest.mark.parametrize('enum_value,expected', [ + (HTTP_VERB.GET, 'GET'), + (HTTP_VERB.POST, 'POST'), + (HTTP_VERB.PUT, 'PUT'), + (HTTP_VERB.DELETE, 'DELETE'), + (HTTP_VERB.PATCH, 'PATCH'), + (HTTP_VERB.OPTIONS, 'OPTIONS'), + (HTTP_VERB.HEAD, 'HEAD') + ]) + def test_http_verb(self, enum_value, expected): + """Test HTTP_VERB enum values.""" + assert enum_value == expected + + @pytest.mark.parametrize('enum_value,expected', [ + (INFRASTRUCTURE.SIMPLE_APIM, 'simple-apim'), + (INFRASTRUCTURE.APIM_ACA, 'apim-aca'), + (INFRASTRUCTURE.AFD_APIM_PE, 'afd-apim-pe') + ]) + def test_infrastructure(self, enum_value, expected): + """Test INFRASTRUCTURE enum values.""" + assert enum_value == expected + + @pytest.mark.parametrize('enum_class,invalid_value', [ + (APIMNetworkMode, 'invalid'), + (APIM_SKU, 'invalid'), + (HTTP_VERB, 'FOO'), + (INFRASTRUCTURE, 'bad') + ]) + def test_invalid_enum_values(self, enum_class, invalid_value): + """Test that invalid enum values raise ValueError.""" + with pytest.raises(ValueError): + enum_class(invalid_value) + + +# ------------------------------ +# API OPERATION TESTS +# ------------------------------ + +class TestAPIOperation: + """Test suite for APIOperation and related classes.""" + + def test_basic_operation_to_dict(self): + """Test APIOperation to_dict method.""" + op = APIOperation( + name='op1', + displayName='Operation 1', + urlTemplate='/foo', + method=HTTP_VERB.GET, + description='desc', + policyXml='' ) + d = op.to_dict() + + assert d['name'] == 'op1' + assert d['displayName'] == 'Operation 1' + assert d['urlTemplate'] == '/foo' + assert d['method'] == HTTP_VERB.GET + assert d['description'] == 'desc' + assert d['policyXml'] == '' + + def test_get_operation(self): + """Test GET_APIOperation convenience class.""" + op = GET_APIOperation(description='desc', policyXml='') + + assert op.name == 'GET' + assert op.method == HTTP_VERB.GET + assert op.urlTemplate == '/' + assert op.description == 'desc' + assert op.policyXml == '' + assert op.to_dict()['method'] == HTTP_VERB.GET + + def test_get_operation2(self): + """Test GET_APIOperation2 class with custom parameters.""" + op = GET_APIOperation2( + name='get-users', + displayName='Get Users', + urlTemplate='/users', + description='Get all users', + policyXml='' + ) + + assert op.name == 'get-users' + assert op.displayName == 'Get Users' + assert op.urlTemplate == '/users' + assert op.method == HTTP_VERB.GET + assert op.description == 'Get all users' + assert op.policyXml == '' + assert op.to_dict()['method'] == HTTP_VERB.GET + + def test_post_operation(self): + """Test POST_APIOperation convenience class.""" + op = POST_APIOperation(description='desc', policyXml='') + + assert op.name == 'POST' + assert op.method == HTTP_VERB.POST + assert op.urlTemplate == '/' + assert op.description == 'desc' + assert op.policyXml == '' + assert op.to_dict()['method'] == HTTP_VERB.POST + + def test_invalid_method(self): + """Test that invalid HTTP method raises ValueError.""" + with pytest.raises(ValueError): + APIOperation( + name='bad', + displayName='Bad', + urlTemplate='/bad', + method='INVALID', + description='desc', + policyXml='' + ) + - with pytest.raises(TypeError): - API( - name = EXAMPLE_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML +# ------------------------------ +# PRODUCT TESTS +# ------------------------------ + +class TestProductCreation: + """Test suite for Product object creation.""" + + @pytest.fixture + def base_product_params(self): + """Common Product parameters.""" + return { + 'name': 'hr', + 'displayName': 'Human Resources', + 'description': 'HR product description' + } + + def test_basic_creation(self, base_product_params): + """Test creation of Product object with defaults.""" + product = Product(**base_product_params) + + assert product.name == 'hr' + assert product.displayName == 'Human Resources' + assert product.description == 'HR product description' + assert product.state == 'published' + assert product.subscriptionRequired is True + assert product.policyXml is not None + + @pytest.mark.parametrize('state,subscription_req,approval_req', [ + ('published', True, False), + ('notPublished', False, False), + ('published', True, True) + ]) + def test_creation_with_custom_values(self, base_product_params, state, subscription_req, approval_req): + """Test creation of Product with various custom values.""" + custom_policy = '' + product = Product( + **base_product_params, + state=state, + subscriptionRequired=subscription_req, + approvalRequired=approval_req, + policyXml=custom_policy ) - with pytest.raises(TypeError): - API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML + assert product.state == state + assert product.subscriptionRequired is subscription_req + assert product.approvalRequired is approval_req + assert product.policyXml == custom_policy + + def test_approval_required_default(self, base_product_params): + """Test that approvalRequired defaults to False.""" + product = Product(**base_product_params) + assert product.approvalRequired is False + + def test_product_fallback_policy_when_file_not_found(self, monkeypatch, base_product_params): + """Test Product uses fallback policy when default policy file is not found.""" + def mock_read_policy_xml_raise(path): + raise FileNotFoundError(f'Policy file not found: {path}') + + monkeypatch.setattr(apimtypes, '_read_policy_xml', mock_read_policy_xml_raise) + + product = Product(**base_product_params) + assert product.policyXml is not None + assert '' in product.policyXml + assert '' in product.policyXml + + +class TestProductSerialization: + """Test suite for Product.to_dict() method.""" + + def test_to_dict_all_fields(self): + """Test that to_dict includes all required fields.""" + custom_policy = '' + product = Product( + name='hr', + displayName='Human Resources', + description='HR product', + state='published', + subscriptionRequired=True, + approvalRequired=True, + policyXml=custom_policy ) + d = product.to_dict() + + assert d['name'] == 'hr' + assert d['displayName'] == 'Human Resources' + assert d['description'] == 'HR product' + assert d['state'] == 'published' + assert d['subscriptionRequired'] is True + assert d['approvalRequired'] is True + assert d['policyXml'] == custom_policy + + +# ------------------------------ +# POLICY FRAGMENT TESTS +# ------------------------------ + +class TestPolicyFragment: + """Test suite for PolicyFragment objects.""" - with pytest.raises(TypeError): - API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - policyXml = EXAMPLE_POLICY_XML + def test_basic_creation(self): + """Test creation of PolicyFragment object.""" + pf = PolicyFragment( + name='Test-Fragment', + policyXml='test', + description='Test fragment' ) + assert pf.name == 'Test-Fragment' + assert pf.policyXml == 'test' + assert pf.description == 'Test fragment' + assert_policy_fragment_structure(pf) + + def test_to_dict(self): + """Test PolicyFragment to_dict method.""" + pf = PolicyFragment( + name='Test-Fragment', + policyXml='test', + description='Test fragment' + ) + d = pf.to_dict() + + assert d['name'] == 'Test-Fragment' + assert d['policyXml'] == 'test' + assert d['description'] == 'Test fragment' + # ------------------------------ -# ENUMS +# OUTPUT CLASS TESTS # ------------------------------ -def test_apimnetworkmode_enum(): - assert APIMNetworkMode.PUBLIC == 'Public' - assert APIMNetworkMode.EXTERNAL_VNET == 'External' - assert APIMNetworkMode.INTERNAL_VNET == 'Internal' - assert APIMNetworkMode.NONE == 'None' - with pytest.raises(ValueError): - APIMNetworkMode('invalid') - -def test_apim_sku_enum(): - assert APIM_SKU.DEVELOPER == 'Developer' - assert APIM_SKU.BASIC == 'Basic' - assert APIM_SKU.STANDARD == 'Standard' - assert APIM_SKU.PREMIUM == 'Premium' - assert APIM_SKU.BASICV2 == 'Basicv2' - assert APIM_SKU.STANDARDV2 == 'Standardv2' - assert APIM_SKU.PREMIUMV2 == 'Premiumv2' - with pytest.raises(ValueError): - APIM_SKU('invalid') - -def test_http_verb_enum(): - assert HTTP_VERB.GET == 'GET' - assert HTTP_VERB.POST == 'POST' - assert HTTP_VERB.PUT == 'PUT' - assert HTTP_VERB.DELETE == 'DELETE' - assert HTTP_VERB.PATCH == 'PATCH' - assert HTTP_VERB.OPTIONS == 'OPTIONS' - assert HTTP_VERB.HEAD == 'HEAD' - with pytest.raises(ValueError): - HTTP_VERB('FOO') - -def test_infrastructure_enum(): - assert INFRASTRUCTURE.SIMPLE_APIM == 'simple-apim' - assert INFRASTRUCTURE.APIM_ACA == 'apim-aca' - assert INFRASTRUCTURE.AFD_APIM_PE == 'afd-apim-pe' - with pytest.raises(ValueError): - INFRASTRUCTURE('bad') +class TestOutput: + """Test suite for Output class.""" + + def test_basic_creation(self): + """Test Output creation with text.""" + output = Output(success=True, text='test output') + + assert output.success is True + assert output.text == 'test output' + assert output.json_data is None + + def test_json_parsing_valid(self): + """Test Output correctly parses valid JSON.""" + json_str = '{"key": "value", "number": 42}' + output = Output(success=True, text=json_str) + + assert output.json_data is not None + assert output.json_data['key'] == 'value' + assert output.json_data['number'] == 42 + + def test_json_parsing_invalid(self): + """Test Output handles invalid JSON gracefully.""" + output = Output(success=True, text='not json') + assert output.json_data is None + + def test_get_method_with_properties_structure(self): + """Test Output.get() with standard deployment output structure.""" + json_text = '''{"properties": {"outputs": {"endpoint": {"value": "https://test.com"}}}}''' + output = Output(success=True, text=json_text) + + result = output.get('endpoint', suppress_logging=True) + assert result == 'https://test.com' + + def test_get_method_with_simple_structure(self): + """Test Output.get() with simple output structure.""" + json_text = '''{"endpoint": {"value": "https://simple.com"}}''' + output = Output(success=True, text=json_text) + + result = output.get('endpoint', suppress_logging=True) + assert result == 'https://simple.com' + + def test_get_method_key_not_found(self): + """Test Output.get() when key is not found.""" + json_text = '''{"properties": {"outputs": {"other": {"value": "val"}}}}''' + output = Output(success=True, text=json_text) + + result = output.get('missing', suppress_logging=True) + assert result is None + + def test_get_method_key_not_found_with_label_raises(self): + """Test Output.get() raises when key not found and label provided.""" + json_text = '''{"properties": {"outputs": {"other": {"value": "val"}}}}''' + output = Output(success=True, text=json_text) + + with pytest.raises(Exception): + output.get('missing', label='Test Label', suppress_logging=True) + + def test_get_method_with_label_and_secure_masking(self): + """Test Output.get() with label and secure masking.""" + json_text = '''{"properties": {"outputs": {"secret": {"value": "supersecretvalue"}}}}''' + output = Output(success=True, text=json_text) + + result = output.get('secret', label='Secret', secure=True) + assert result == 'supersecretvalue' + + def test_get_method_json_data_not_dict(self): + """Test Output.get() when json_data is not a dict.""" + output = Output(success=True, text='["array", "data"]') + + result = output.get('key', suppress_logging=True) + assert result is None + + def test_get_method_properties_not_dict(self): + """Test Output.get() when properties is not a dict.""" + json_text = '''{"properties": "not a dict"}''' + output = Output(success=True, text=json_text) + + result = output.get('key', suppress_logging=True) + assert result is None + + def test_get_method_outputs_not_dict(self): + """Test Output.get() when outputs is not a dict.""" + json_text = '''{"properties": {"outputs": "not a dict"}}''' + output = Output(success=True, text=json_text) + + result = output.get('key', suppress_logging=True) + assert result is None + + def test_get_method_output_entry_invalid(self): + """Test Output.get() when output entry is invalid.""" + json_text = '''{"properties": {"outputs": {"key": "no value field"}}}''' + output = Output(success=True, text=json_text) + + result = output.get('key', suppress_logging=True) + assert result is None + + def test_getjson_method_with_dict_value(self): + """Test Output.getJson() with dictionary value.""" + json_text = '''{"properties": {"outputs": {"config": {"value": {"key": "val"}}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('config', suppress_logging=True) + assert result == {'key': 'val'} + + def test_getjson_method_with_string_json(self): + """Test Output.getJson() parsing string as JSON.""" + json_text = '''{"properties": {"outputs": {"data": {"value": "{\\"nested\\": \\"value\\"}"}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('data', suppress_logging=True) + assert result == {'nested': 'value'} + + def test_getjson_method_with_python_literal(self): + """Test Output.getJson() parsing Python literal.""" + json_text = '''{"properties": {"outputs": {"data": {"value": "{'key': 'value'}"}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('data', suppress_logging=True) + assert result == {'key': 'value'} + + def test_getjson_method_unparseable_string(self): + """Test Output.getJson() with unparseable string returns original value.""" + json_text = '''{"properties": {"outputs": {"data": {"value": "not valid json or literal"}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('data') + assert result == 'not valid json or literal' + + def test_getjson_method_key_not_found(self): + """Test Output.getJson() when key not found.""" + json_text = '''{"properties": {"outputs": {"other": {"value": "val"}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('missing', suppress_logging=True) + assert result is None + + def test_getjson_method_raises_with_label(self): + """Test Output.getJson() raises when key not found and label provided.""" + json_text = '''{"properties": {"outputs": {}}}''' + output = Output(success=True, text=json_text) + + with pytest.raises(Exception): + output.getJson('missing', label='Test') + + def test_getjson_method_json_data_not_dict(self): + """Test Output.getJson() when json_data is not a dict.""" + output = Output(success=True, text='[1, 2, 3]') + + result = output.getJson('key', suppress_logging=True) + assert result is None + + def test_getjson_method_properties_not_dict(self): + """Test Output.getJson() when properties is not a dict.""" + json_text = '''{"properties": ["not", "a", "dict"]}''' + output = Output(success=True, text=json_text) + + result = output.getJson('key', suppress_logging=True) + assert result is None + + def test_getjson_method_outputs_not_dict(self): + """Test Output.getJson() when outputs is not a dict.""" + json_text = '''{"properties": {"outputs": ["not", "dict"]}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('key', suppress_logging=True) + assert result is None + + def test_getjson_method_output_entry_invalid(self): + """Test Output.getJson() when output entry is missing value field.""" + json_text = '''{"properties": {"outputs": {"key": {"no_value": "here"}}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('key', suppress_logging=True) + assert result is None + + def test_output_with_simple_structure_getjson(self): + """Test Output.getJson() with simple structure (no properties wrapper).""" + json_text = '''{"data": {"value": {"nested": "obj"}}}''' + output = Output(success=True, text=json_text) + + result = output.getJson('data', suppress_logging=True) + assert result == {'nested': 'obj'} # ------------------------------ -# OPERATION CLASSES +# NAMED VALUE TESTS # ------------------------------ -def test_apioperation_to_dict(): - op = APIOperation( - name='op1', - displayName='Operation 1', - urlTemplate='/foo', - method=HTTP_VERB.GET, - description='desc', - policyXml='' - ) - d = op.to_dict() - assert d['name'] == 'op1' - assert d['displayName'] == 'Operation 1' - assert d['urlTemplate'] == '/foo' - assert d['method'] == HTTP_VERB.GET - assert d['description'] == 'desc' - assert d['policyXml'] == '' - -def test_get_apioperation(): - op = GET_APIOperation(description='desc', policyXml='') - assert op.name == 'GET' - assert op.method == HTTP_VERB.GET - assert op.urlTemplate == '/' - assert op.description == 'desc' - assert op.policyXml == '' - d = op.to_dict() - assert d['method'] == HTTP_VERB.GET - -def test_post_apioperation(): - op = POST_APIOperation(description='desc', policyXml='') - assert op.name == 'POST' - assert op.method == HTTP_VERB.POST - assert op.urlTemplate == '/' - assert op.description == 'desc' - assert op.policyXml == '' - d = op.to_dict() - assert d['method'] == HTTP_VERB.POST - -def test_apioperation_invalid_method(): - # Negative: method must be a valid HTTP_VERB - with pytest.raises(ValueError): - APIOperation( - name='bad', - displayName='Bad', - urlTemplate='/bad', - method='INVALID', - description='desc', - policyXml='' +class TestNamedValue: + """Test suite for NamedValue objects.""" + + @pytest.mark.parametrize('is_secret', [True, False]) + def test_creation(self, is_secret): + """Test NamedValue creation with secret flag.""" + nv = NamedValue( + name='test-key', + value='test-value', + isSecret=is_secret ) + assert nv.name == 'test-key' + assert nv.value == 'test-value' + assert nv.isSecret is is_secret + + def test_to_dict(self): + """Test NamedValue to_dict method.""" + nv = NamedValue(name='key', value='val', isSecret=True) + d = nv.to_dict() + + assert d['name'] == 'key' + assert d['value'] == 'val' + assert d['isSecret'] is True + + # ------------------------------ -# PRODUCT TESTS +# ROLE TESTS # ------------------------------ -@pytest.mark.unit -def test_product_creation(): - """Test creation of Product object and its attributes.""" - product = Product( - name = 'hr', - displayName = 'Human Resources', - description = 'HR product description' - ) - - assert product.name == 'hr' - assert product.displayName == 'Human Resources' - assert product.description == 'HR product description' - assert product.state == 'published' # default value - assert product.subscriptionRequired is True # default value - assert product.policyXml is not None # should have default policy - - -@pytest.mark.unit -def test_product_creation_with_custom_values(): - """Test creation of Product object with custom values.""" - custom_policy = '' - product = Product( - name = 'test-product', - displayName = 'Test Product', - description = 'Test description', - state = 'notPublished', - subscriptionRequired = False, - policyXml = custom_policy - ) - - assert product.name == 'test-product' - assert product.displayName == 'Test Product' - assert product.description == 'Test description' - assert product.state == 'notPublished' - assert product.subscriptionRequired is False - assert product.policyXml == custom_policy - - -@pytest.mark.unit -def test_product_creation_with_approval_required(): - """Test creation of Product object with approvalRequired set to True.""" - product = Product( - name = 'premium-hr', - displayName = 'Premium Human Resources', - description = 'Premium HR product requiring approval', - subscriptionRequired = True, - approvalRequired = True - ) - - assert product.name == 'premium-hr' - assert product.displayName == 'Premium Human Resources' - assert product.description == 'Premium HR product requiring approval' - assert product.state == 'published' # default value - assert product.subscriptionRequired is True - assert product.approvalRequired is True - assert product.policyXml is not None # should have default policy - - -@pytest.mark.unit -def test_product_to_dict(): - """Test that to_dict includes all required fields.""" - custom_policy = '' - product = Product( - name = 'hr', - displayName = 'Human Resources', - description = 'HR product', - state = 'published', - subscriptionRequired = True, - policyXml = custom_policy - ) - d = product.to_dict() - - assert d['name'] == 'hr' - assert d['displayName'] == 'Human Resources' - assert d['description'] == 'HR product' - assert d['state'] == 'published' - assert d['subscriptionRequired'] is True - assert d['policyXml'] == custom_policy - - -@pytest.mark.unit -def test_product_to_dict_includes_approval_required(): - """Test that to_dict includes approvalRequired field.""" - product = Product( - name = 'premium-hr', - displayName = 'Premium Human Resources', - description = 'Premium HR product', - subscriptionRequired = True, - approvalRequired = True - ) - d = product.to_dict() - - assert d['name'] == 'premium-hr' - assert d['displayName'] == 'Premium Human Resources' - assert d['description'] == 'Premium HR product' - assert d['state'] == 'published' - assert d['subscriptionRequired'] is True - assert d['approvalRequired'] is True - assert 'policyXml' in d - - -@pytest.mark.unit -def test_product_approval_required_default_false(): - """Test that approvalRequired defaults to False when not specified.""" - product = Product( - name = 'basic-hr', - displayName = 'Basic Human Resources', - description = 'Basic HR product' - ) - - assert product.approvalRequired is False - d = product.to_dict() - assert d['approvalRequired'] is False - - -@pytest.mark.unit -def test_product_equality(): - """Test equality comparison for Product objects.""" - product1 = Product( - name = 'hr', - displayName = 'Human Resources', - description = 'HR product' - ) - product2 = Product( - name = 'hr', - displayName = 'Human Resources', - description = 'HR product' - ) - assert product1 == product2 - - # Different names should not be equal - product3 = Product( - name = 'finance', - displayName = 'Human Resources', - description = 'HR product' - ) - assert product1 != product3 - - -@pytest.mark.unit -def test_product_repr(): - """Test __repr__ method of Product.""" - product = Product( - name = 'hr', - displayName = 'Human Resources', - description = 'HR product' - ) - result = repr(product) - assert 'Product' in result - assert 'hr' in result - assert 'Human Resources' in result - -@pytest.mark.unit -def test_api_subscription_required_default(): - """Test that API object has subscriptionRequired defaulting to True.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None - ) - assert api.subscriptionRequired is True - -@pytest.mark.unit -def test_api_subscription_required_explicit_false(): - """Test creation of API object with explicit subscriptionRequired=False.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = False - ) - assert api.subscriptionRequired is False - -@pytest.mark.unit -def test_api_subscription_required_explicit_true(): - """Test creation of API object with explicit subscriptionRequired=True.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = True - ) - assert api.subscriptionRequired is True - -@pytest.mark.unit -def test_api_to_dict_includes_subscription_required_when_true(): - """Test that to_dict includes subscriptionRequired when True.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = True - ) - d = api.to_dict() - assert 'subscriptionRequired' in d - assert d['subscriptionRequired'] is True - -@pytest.mark.unit -def test_api_to_dict_includes_subscription_required_when_false(): - """Test that to_dict includes subscriptionRequired when explicitly False.""" - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = False - ) - d = api.to_dict() - assert 'subscriptionRequired' in d - assert d['subscriptionRequired'] is False - -@pytest.mark.unit -def test_api_equality_with_subscription_required(): - """Test equality comparison for API objects with different subscriptionRequired values.""" - api1 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = True - ) - api2 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = True - ) - api3 = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - subscriptionRequired = False - ) - - # Same subscriptionRequired values should be equal - assert api1 == api2 - - # Different subscriptionRequired values should not be equal - assert api1 != api3 - -@pytest.mark.unit -def test_api_with_all_properties(): - """Test creation of API object with all properties including subscriptionRequired.""" - tags = ['tag1', 'tag2'] - product_names = ['product1', 'product2'] - api = API( - name = EXAMPLE_NAME, - displayName = EXAMPLE_DISPLAY_NAME, - path = EXAMPLE_PATH, - description = EXAMPLE_DESCRIPTION, - policyXml = EXAMPLE_POLICY_XML, - operations = None, - tags = tags, - productNames = product_names, - subscriptionRequired = True - ) - - assert api.name == EXAMPLE_NAME - assert api.displayName == EXAMPLE_DISPLAY_NAME - assert api.path == EXAMPLE_PATH - assert api.description == EXAMPLE_DESCRIPTION - assert api.policyXml == EXAMPLE_POLICY_XML - assert api.operations == [] - assert api.tags == tags - assert api.productNames == product_names - assert api.subscriptionRequired is True - - d = api.to_dict() - assert d['name'] == EXAMPLE_NAME - assert d['displayName'] == EXAMPLE_DISPLAY_NAME - assert d['path'] == EXAMPLE_PATH - assert d['description'] == EXAMPLE_DESCRIPTION - assert d['policyXml'] == EXAMPLE_POLICY_XML - assert d['tags'] == tags - assert d['productNames'] == product_names - assert d['subscriptionRequired'] is True +class TestRole: + """Test suite for Role class (mock GUIDs).""" + + def test_role_constants(self): + """Test Role has expected constants.""" + assert hasattr(Role, 'NONE') + assert hasattr(Role, 'HR_MEMBER') + assert hasattr(Role, 'HR_ASSOCIATE') + assert hasattr(Role, 'HR_ADMINISTRATOR') + assert hasattr(Role, 'MARKETING_MEMBER') # ------------------------------ -# MISSING COVERAGE TESTS FOR APIMTYPES +# CONSTANTS TESTS # ------------------------------ -def test_named_value_creation(): - """Test NamedValue creation and methods.""" - nv = NamedValue( - name='test-nv', - value='test-value', - isSecret=True - ) - assert nv.name == 'test-nv' - assert nv.value == 'test-value' - assert nv.isSecret is True - - # Test to_dict method - d = nv.to_dict() - assert d['name'] == 'test-nv' - assert d['isSecret'] is True - -def test_named_value_defaults(): - """Test NamedValue default values.""" - nv = NamedValue(name='test', value='value') - assert nv.isSecret is False # default value - -def test_policy_fragment_creation(): - """Test PolicyFragment creation and methods.""" - pf = PolicyFragment( - name='test-fragment', - description='Test fragment', - policyXml='' - ) - assert pf.name == 'test-fragment' - assert pf.description == 'Test fragment' - assert pf.policyXml == '' - - # Test to_dict method - d = pf.to_dict() - assert d['name'] == 'test-fragment' - assert d['policyXml'] == '' - -def test_policy_fragment_defaults(): - """Test PolicyFragment default values.""" - pf = PolicyFragment(name='test', policyXml='') - assert not pf.description # default value - -def test_product_defaults(): - """Test Product default values.""" - product = Product(name='test', displayName='Test', description='Test description') - assert product.state == 'published' # default value - assert product.subscriptionRequired is True # default value - -def test_get_apioperation2(): - """Test GET_APIOperation2 class.""" - op = GET_APIOperation2( - name='test-op', - displayName='Test Operation', - urlTemplate='/test', - description='test', - policyXml='' - ) - assert op.name == 'test-op' - assert op.displayName == 'Test Operation' - assert op.urlTemplate == '/test' - assert op.method == HTTP_VERB.GET - assert op.description == 'test' - assert op.policyXml == '' - -def test_api_operation_equality(): - """Test APIOperation equality comparison.""" - op1 = APIOperation( - name='test', - displayName='Test', - urlTemplate='/test', - method=HTTP_VERB.GET, - description='Test op', - policyXml='' - ) - op2 = APIOperation( - name='test', - displayName='Test', - urlTemplate='/test', - method=HTTP_VERB.GET, - description='Test op', - policyXml='' - ) - op3 = APIOperation( - name='different', - displayName='Test', - urlTemplate='/test', - method=HTTP_VERB.GET, - description='Test op', - policyXml='' - ) - - assert op1 == op2 - assert op1 != op3 - -def test_api_operation_repr(): - """Test APIOperation __repr__ method.""" - op = APIOperation( - name='test', - displayName='Test', - urlTemplate='/test', - method=HTTP_VERB.GET, - description='Test op', - policyXml='' - ) - result = repr(op) - assert 'APIOperation' in result - assert 'test' in result - -def test_named_value_repr(): - """Test NamedValue __repr__ method.""" - nv = NamedValue(name='test-nv', value='value') - result = repr(nv) - assert 'NamedValue' in result - assert 'test-nv' in result - -def test_policy_fragment_repr(): - """Test PolicyFragment __repr__ method.""" - pf = PolicyFragment(name='test-fragment', policyXml='') - result = repr(pf) - assert 'PolicyFragment' in result - assert 'test-fragment' in result +class TestConstants: + """Test suite for module-level constants.""" + + def test_policy_paths_exist(self): + """Test that policy path constants are defined.""" + assert BACKEND_XML_POLICY_PATH is not None + assert DEFAULT_XML_POLICY_PATH is not None + assert HELLO_WORLD_XML_POLICY_PATH is not None + assert REQUEST_HEADERS_XML_POLICY_PATH is not None + + def test_subscription_key_parameter(self): + """Test subscription key parameter name.""" + assert SUBSCRIPTION_KEY_PARAMETER_NAME == 'api-key' + + def test_sleep_time_constant(self): + """Test sleep time constant is defined.""" + assert isinstance(SLEEP_TIME_BETWEEN_REQUESTS_MS, int) + assert SLEEP_TIME_BETWEEN_REQUESTS_MS > 0 # ------------------------------ -# ADDITIONAL COVERAGE TESTS +# PROJECT ROOT TESTS # ------------------------------ -def testget_project_root_functionality(): - """Test get_project_root function comprehensively.""" - - # This function should return the project root - root = get_project_root() - assert isinstance(root, Path) - assert root.exists() - -def test_output_class_basic(): - """Test Output class initialization and properties.""" - # Test successful output with JSON - output = Output(True, '{"key": "value"}') - assert output.success is True - assert output.text == '{"key": "value"}' - assert output.json_data == {"key": "value"} - assert output.is_json is True - -def test_output_class_non_json(): - """Test Output class with non-JSON text.""" - output = Output(True, 'some plain text') - assert output.success is True - assert output.text == 'some plain text' - assert output.json_data is None - assert output.is_json is False - -def test_output_get_with_properties_structure(): - """Test Output.get() with deployment output structure.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'apimName': {'value': 'my-apim'} - } - } - }) - output = Output(True, json_text) - result = output.get('apimName', suppress_logging=True) - assert result == 'my-apim' - -def test_output_get_missing_key(): - """Test Output.get() with missing key.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'apimName': {'value': 'my-apim'} - } - } - }) - output = Output(True, json_text) - result = output.get('nonExistent', suppress_logging=True) - assert result is None - -def test_output_get_non_dict_json(): - """Test Output.get() when json_data is not a dict.""" - output = Output(True, '[1, 2, 3]') - result = output.get('key', suppress_logging=True) - assert result is None - -def test_output_get_missing_properties(): - """Test Output.get() when 'properties' key is missing.""" - json_text = json.dumps({ - 'data': { - 'outputs': { - 'apimName': {'value': 'my-apim'} - } - } - }) - output = Output(True, json_text) - # Should look for key at root level - result = output.get('apimName', suppress_logging=True) - assert result is None - -def test_output_getJson_with_nested_structure(): - """Test Output.getJson() returns parsed JSON from nested value.""" - nested_json = '{"nested": "data"}' - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'config': {'value': nested_json} - } - } - }) - output = Output(True, json_text) - result = output.getJson('config', suppress_logging=True) - assert result == {"nested": "data"} - -def test_output_getJson_with_dict_value(): - """Test Output.getJson() with dict value.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'config': {'value': {"nested": "dict"}} - } - } - }) - output = Output(True, json_text) - result = output.getJson('config', suppress_logging=True) - assert result == {"nested": "dict"} - -def test_output_getJson_with_missing_key(): - """Test Output.getJson() with missing key.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'apimName': {'value': 'my-apim'} - } - } - }) - output = Output(True, json_text) - result = output.getJson('nonExistent', suppress_logging=True) - assert result is None - -def test_output_get_with_direct_key(): - """Test Output.get() when output key is at root level.""" - json_text = json.dumps({ - 'apimName': {'value': 'my-apim'}, - 'location': {'value': 'eastus'} - }) - output = Output(True, json_text) - result = output.get('apimName', suppress_logging=True) - assert result == 'my-apim' - -def test_output_get_with_label_and_secure(): - """Test Output.get() with label and secure masking.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'secretKey': {'value': 'very-secret-key-12345'} - } - } - }) - output = Output(True, json_text) - # Should not raise even with label; we suppress logging in test - result = output.get('secretKey', label='Secret', secure=True, suppress_logging=True) - assert result == 'very-secret-key-12345' - -def test_output_getJson_with_list_value(): - """Test Output.getJson() with array value.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'items': {'value': [1, 2, 3, 4, 5]} - } - } - }) - output = Output(True, json_text) - result = output.getJson('items', suppress_logging=True) - assert result == [1, 2, 3, 4, 5] - -def test_output_getJson_with_string_json_value(): - """Test Output.getJson() when value is JSON-formatted string.""" - nested_json = '{"nested": "object"}' - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'config': {'value': nested_json} - } - } - }) - output = Output(True, json_text) - result = output.getJson('config', suppress_logging=True) - assert result == {"nested": "object"} - -def test_output_get_empty_string_value(): - """Test Output.get() with empty string value.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'empty': {'value': ''} - } - } - }) - output = Output(True, json_text) - result = output.get('empty', suppress_logging=True) - assert not result - -def test_output_getJson_empty_object(): - """Test Output.getJson() with empty JSON object.""" - json_text = json.dumps({ - 'properties': { - 'outputs': { - 'emptyObj': {'value': {}} - } - } - }) - output = Output(True, json_text) - result = output.getJson('emptyObj', suppress_logging=True) - assert result == {} - -def test_output_parse_error_handling(): - """Test Output class handles JSON parse errors gracefully.""" - # JSON that doesn't parse but has structure - output = Output(True, '{invalid json here}') - # Should still initialize without crashing - assert output.text == '{invalid json here}' - assert output.success is True - - -def test_api_edge_cases(): - """Test API class with edge cases and full coverage.""" - # Test with all None/empty values - api = API('', '', '', '', '', operations=None, tags=None, productNames=None) - assert not api.name - assert api.operations == [] - assert api.tags == [] - assert api.productNames == [] - - # Test subscription required variations - api_sub_true = API('test', 'Test', '/test', 'desc', 'policy', subscriptionRequired=True) - assert api_sub_true.subscriptionRequired is True - - api_sub_false = API('test', 'Test', '/test', 'desc', 'policy', subscriptionRequired=False) - assert api_sub_false.subscriptionRequired is False - - -def test_product_edge_cases(): - """Test Product class with edge cases.""" - # Test with minimal parameters - product = Product('test', 'Test Product', 'Test Description') - assert product.name == 'test' - assert product.displayName == 'Test Product' - assert product.description == 'Test Description' - assert product.state == 'published' - assert product.subscriptionRequired is True # Default is True - assert product.approvalRequired is False - # Policy XML should contain some content, not be empty - assert product.policyXml is not None and len(product.policyXml) > 0 - - # Test with all parameters - product_full = Product( - 'full', 'Full Product', 'Description', 'notPublished', - True, True, '' - ) - assert product_full.state == 'notPublished' - assert product_full.subscriptionRequired is True - assert product_full.approvalRequired is True - assert product_full.policyXml == '' - - -def test_named_value_edge_cases(): - """Test NamedValue class edge cases.""" - # Test with minimal parameters - nv = NamedValue('key', 'value') - assert nv.name == 'key' - assert nv.value == 'value' - assert nv.isSecret is False # Use correct attribute name - - # Test with secret - nv_secret = NamedValue('secret-key', 'secret-value', True) - assert nv_secret.isSecret is True # Use correct attribute name - - -def test_policy_fragment_edge_cases(): - """Test PolicyFragment class edge cases.""" - # Test with minimal parameters - pf = PolicyFragment('frag', '') - assert pf.name == 'frag' - assert pf.policyXml == '' # Use correct attribute name - assert not pf.description - - # Test with description - pf_desc = PolicyFragment('frag', '', 'Test fragment') - assert pf_desc.description == 'Test fragment' - - -def test_api_operation_comprehensive(): - """Test APIOperation class comprehensively.""" - # Test invalid HTTP method - with pytest.raises(ValueError, match='Invalid HTTP_VERB'): - APIOperation('test', 'Test', '/test', 'INVALID', 'Test description', '') - - # Test all valid methods - for method in ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS']: - # Get HTTP_VERB enum value - http_verb = HTTP_VERB(method) - op = APIOperation(f'test-{method.lower()}', f'Test {method}', f'/test-{method.lower()}', http_verb, f'Test {method} description', '') - assert op.method == http_verb - assert op.displayName == f'Test {method}' - assert op.policyXml == '' - - -def test_convenience_functions(): - """Test convenience functions for API operations.""" - get_op = GET_APIOperation('Get data', '') - assert get_op.method == HTTP_VERB.GET - assert get_op.displayName == 'GET' # displayName is set to 'GET', not the description - assert get_op.description == 'Get data' # description parameter goes to description field - - post_op = POST_APIOperation('Post data', '') - assert post_op.method == HTTP_VERB.POST - assert post_op.displayName == 'POST' # displayName is set to 'POST', not the description - assert post_op.description == 'Post data' # description parameter goes to description field - - -def test_enum_edge_cases(): - """Test enum edge cases and completeness.""" - # Test all enum values exist - assert hasattr(INFRASTRUCTURE, 'SIMPLE_APIM') - assert hasattr(INFRASTRUCTURE, 'AFD_APIM_PE') - assert hasattr(INFRASTRUCTURE, 'APIM_ACA') - - assert hasattr(APIM_SKU, 'DEVELOPER') - assert hasattr(APIM_SKU, 'BASIC') - assert hasattr(APIM_SKU, 'STANDARD') - assert hasattr(APIM_SKU, 'PREMIUM') - - assert hasattr(APIMNetworkMode, 'EXTERNAL_VNET') # Correct enum name - assert hasattr(APIMNetworkMode, 'INTERNAL_VNET') # Correct enum name - - assert hasattr(HTTP_VERB, 'GET') - assert hasattr(HTTP_VERB, 'POST') - - -def test_role_enum_comprehensive(): - """Test Role enum comprehensively.""" - # Test all role values (these are GUIDs, not string names) - assert Role.HR_MEMBER == '316790bc-fbd3-4a14-8867-d1388ffbc195' - assert Role.HR_ASSOCIATE == 'd3c1b0f2-4a5e-4c8b-9f6d-7c8e1f2a3b4c' - assert Role.HR_ADMINISTRATOR == 'a1b2c3d4-e5f6-7g8h-9i0j-k1l2m3n4o5p6' - - -def test_to_dict_comprehensive(): - """Test to_dict methods comprehensively.""" - # Test API with all properties - op = GET_APIOperation('Get', '') - api = API( - 'test-api', 'Test API', '/test', 'Test desc', '', - operations=[op], tags=['tag1', 'tag2'], productNames=['prod1'], - subscriptionRequired=True - ) - - api_dict = api.to_dict() - assert api_dict['name'] == 'test-api' - assert api_dict['displayName'] == 'Test API' - assert api_dict['path'] == '/test' - assert api_dict['description'] == 'Test desc' - assert api_dict['policyXml'] == '' - assert len(api_dict['operations']) == 1 - assert api_dict['tags'] == ['tag1', 'tag2'] - assert api_dict['productNames'] == ['prod1'] - assert api_dict['subscriptionRequired'] is True - - # Test Product to_dict - product = Product('prod', 'Product', 'Desc', 'published', True, True, '') - prod_dict = product.to_dict() - assert prod_dict['name'] == 'prod' - assert prod_dict['displayName'] == 'Product' - assert prod_dict['description'] == 'Desc' - assert prod_dict['state'] == 'published' - assert prod_dict['subscriptionRequired'] is True - assert prod_dict['approvalRequired'] is True - assert prod_dict['policyXml'] == '' - - # Test NamedValue to_dict - nv = NamedValue('key', 'value', True) - nv_dict = nv.to_dict() - assert nv_dict['name'] == 'key' - assert nv_dict['value'] == 'value' - assert nv_dict['isSecret'] is True # Use correct key name - - # Test PolicyFragment to_dict - pf = PolicyFragment('frag', '', 'Fragment desc') - pf_dict = pf.to_dict() - assert pf_dict['name'] == 'frag' - assert pf_dict['policyXml'] == '' # Use correct key name - assert pf_dict['description'] == 'Fragment desc' - - -def test_equality_and_repr_comprehensive(): - """Test equality and repr methods comprehensively.""" - api1 = API('test', 'Test', '/test', 'desc', 'policy') - api2 = API('test', 'Test', '/test', 'desc', 'policy') - api3 = API('different', 'Different', '/diff', 'desc', 'policy') - - assert api1 == api2 - assert api1 != api3 - assert api1 != 'not an api' - - # Test repr - repr_str = repr(api1) - assert 'API' in repr_str - assert 'test' in repr_str - - # Test Product equality and repr - prod1 = Product('prod', 'Product', 'Product description') - prod2 = Product('prod', 'Product', 'Product description') - prod3 = Product('other', 'Other', 'Other description') - - assert prod1 == prod2 - assert prod1 != prod3 - assert prod1 != 'not a product' - - repr_str = repr(prod1) - assert 'Product' in repr_str - assert 'prod' in repr_str - - # Test APIOperation equality and repr - op1 = GET_APIOperation('Get', '') - op2 = GET_APIOperation('Get', '') - op3 = POST_APIOperation('Post', '') - - assert op1 == op2 - assert op1 != op3 - assert op1 != 'not an operation' - - repr_str = repr(op1) - assert 'APIOperation' in repr_str - assert 'GET' in repr_str - - -def test_constants_accessibility(): - """Test that all constants are accessible.""" - # Test policy file paths - assert isinstance(DEFAULT_XML_POLICY_PATH, str) - assert isinstance(HELLO_WORLD_XML_POLICY_PATH, str) - assert isinstance(REQUEST_HEADERS_XML_POLICY_PATH, str) - assert isinstance(BACKEND_XML_POLICY_PATH, str) - - # Test other constants - assert isinstance(SUBSCRIPTION_KEY_PARAMETER_NAME, str) - assert isinstance(SLEEP_TIME_BETWEEN_REQUESTS_MS, int) +class TestProjectRoot: + """Test suite for get_project_root function.""" + + def test_get_project_root(self): + """Test that get_project_root returns a valid Path.""" + root = get_project_root() + + assert isinstance(root, Path) + assert root.exists() + assert root.is_dir() + + def test_get_project_root_from_env_var(self, monkeypatch): + """Test get_project_root uses PROJECT_ROOT environment variable.""" + test_path = Path('c:/test/project') + monkeypatch.setenv('PROJECT_ROOT', str(test_path)) + + # Need to reimport to pick up new env var + importlib.reload(apimtypes) + + root = apimtypes.get_project_root() + assert root == test_path + + def test_get_project_root_returns_path_with_indicators(self, tmp_path, monkeypatch): + """Test get_project_root finds correct directory with indicators.""" + # Create directory structure + project_dir = tmp_path / 'project' + project_dir.mkdir() + (project_dir / 'README.md').write_text('test') + (project_dir / 'requirements.txt').write_text('test') + (project_dir / 'bicepconfig.json').write_text('test') + + # Mock __file__ to point into a subdirectory + shared_dir = project_dir / 'shared' / 'python' + shared_dir.mkdir(parents=True) + test_file = shared_dir / 'apimtypes.py' + test_file.write_text('test') + + # Remove env var to force detection logic + monkeypatch.delenv('PROJECT_ROOT', raising=False) + + with patch('apimtypes.Path') as mock_path_class: + mock_path_instance = MagicMock() + mock_path_instance.resolve.return_value = test_file.resolve() + mock_path_class.return_value = mock_path_instance + mock_path_class.side_effect = lambda x: Path(x) if isinstance(x, str) else mock_path_instance + + # Call using patched Path directly on already-imported module + root = apimtypes.get_project_root() + + # Should find the project directory + assert root == project_dir or root.exists() + + def test_get_project_root_contains_required_files(self): + """Test that detected project root contains required indicator files.""" + root = get_project_root() + + # Verify it has the expected files + assert (root / 'README.md').exists() + assert (root / 'requirements.txt').exists() + assert (root / 'bicepconfig.json').exists() diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py index 65e27fe..ca245ad 100644 --- a/tests/python/test_azure_resources.py +++ b/tests/python/test_azure_resources.py @@ -3,12 +3,14 @@ """ import json -from unittest.mock import Mock, patch, mock_open, call, MagicMock +import time +from unittest.mock import Mock, patch, mock_open, call import pytest # APIM Samples imports import azure_resources as az from apimtypes import INFRASTRUCTURE, Endpoints, Output +from test_helpers import suppress_module_functions # ------------------------------ @@ -255,10 +257,7 @@ def fake_run(cmd: str, *args, **kwargs): return Output(False, 'unexpected command') monkeypatch.setattr(az, 'run', fake_run) - monkeypatch.setattr(az, 'print_message', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_info', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_ok', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_error', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_message', 'print_info', 'print_ok', 'print_error']) result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'JwtSigningKey-sample-456') @@ -273,9 +272,7 @@ def test_cleanup_old_jwt_signing_keys_invalid_pattern(monkeypatch): """Test cleanup when current key name does not match expected pattern.""" monkeypatch.setattr(az, 'run', lambda *a, **k: pytest.fail('run should not be called')) - monkeypatch.setattr(az, 'print_message', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_info', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_ok', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_message', 'print_info', 'print_ok']) result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'invalid-key-name') @@ -290,10 +287,7 @@ def test_check_apim_blob_permissions_success(monkeypatch): """Test blob permission check succeeds when role assignment and access test succeed.""" monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') - monkeypatch.setattr(az, 'print_info', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_ok', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_warning', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_error', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) run_calls: list[str] = [] @@ -328,10 +322,7 @@ def test_check_apim_blob_permissions_missing_resource_id(monkeypatch): """Test blob permission check fails when storage account ID cannot be parsed.""" monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') - monkeypatch.setattr(az, 'print_info', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_ok', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_warning', lambda *a, **k: None) - monkeypatch.setattr(az, 'print_error', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) def fake_run(cmd: str, *args, **kwargs): if 'apim show' in cmd: @@ -703,9 +694,7 @@ def test_get_account_info_missing_user_id(monkeypatch): def test_cleanup_old_jwt_signing_keys_no_matching_pattern(monkeypatch): """Test cleanup_old_jwt_signing_keys with non-matching key pattern.""" - monkeypatch.setattr('azure_resources.print_message', MagicMock()) - monkeypatch.setattr('azure_resources.print_info', MagicMock()) - monkeypatch.setattr('azure_resources.print_ok', MagicMock()) + suppress_module_functions(monkeypatch, az, ['print_message', 'print_info', 'print_ok']) result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'InvalidKeyPattern-123') assert result is False @@ -724,10 +713,7 @@ def fake_run(cmd, *args, **kwargs): return Output(False, 'Unknown') monkeypatch.setattr('azure_resources.run', fake_run) - monkeypatch.setattr('azure_resources.print_message', MagicMock()) - monkeypatch.setattr('azure_resources.print_info', MagicMock()) - monkeypatch.setattr('azure_resources.print_ok', MagicMock()) - monkeypatch.setattr('azure_resources.print_error', MagicMock()) + suppress_module_functions(monkeypatch, az, ['print_message', 'print_info', 'print_ok', 'print_error']) result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'JwtSigningKey-sample-99999') assert result is True @@ -948,15 +934,99 @@ def fake_run(cmd, *args, **kwargs): return Output(False, 'Error') monkeypatch.setattr('azure_resources.run', fake_run) - monkeypatch.setattr('azure_resources.print_info', MagicMock()) - monkeypatch.setattr('azure_resources.print_ok', MagicMock()) - monkeypatch.setattr('azure_resources.print_warning', MagicMock()) - monkeypatch.setattr('azure_resources.print_error', MagicMock()) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) result = az.check_apim_blob_permissions('apim', 'storage', 'rg') assert result is False +def test_check_apim_blob_permissions_timeout_waiting_for_propagation(monkeypatch): + """Test blob permission check times out when waiting for role assignment propagation.""" + def fake_run(cmd, *args, **kwargs): + if 'apim show' in cmd: + return Output(True, 'principal-id\n') + if 'storage account show' in cmd: + return Output(True, '/subscriptions/123/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/storage\n') + if 'role assignment list' in cmd: + # Never return a role assignment (timeout scenario) + return Output(True, '') + return Output(False, 'unexpected') + + monkeypatch.setattr(az, 'run', fake_run) + monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') + monkeypatch.setattr(az.time, 'sleep', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) + + result = az.check_apim_blob_permissions('apim', 'storage', 'rg', max_wait_minutes=1) + assert result is False + + +def test_check_apim_blob_permissions_storage_account_retrieval_fails(monkeypatch): + """Test blob permission check fails when storage account retrieval fails.""" + def fake_run(cmd, *args, **kwargs): + if 'apim show' in cmd: + return Output(True, 'principal-id\n') + if 'storage account show' in cmd: + return Output(False, 'Error retrieving account') + return Output(False, 'unexpected') + + monkeypatch.setattr(az, 'run', fake_run) + monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) + + result = az.check_apim_blob_permissions('apim', 'storage', 'rg') + assert result is False + + +def test_check_apim_blob_permissions_role_assignment_exists_but_blob_access_fails(monkeypatch): + """Test when role assignment exists but blob access test fails.""" + def fake_run(cmd, *args, **kwargs): + if 'apim show' in cmd: + return Output(True, 'principal-id\n') + if 'storage account show' in cmd: + return Output(True, '/subscriptions/123/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/storage\n') + if 'role assignment list' in cmd: + return Output(True, 'assignment-id\n') + if 'storage blob list' in cmd: + return Output(True, 'access-test-failed') + return Output(False, 'unexpected') + + monkeypatch.setattr(az, 'run', fake_run) + monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') + monkeypatch.setattr(az.time, 'sleep', lambda *a, **k: None) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) + + result = az.check_apim_blob_permissions('apim', 'storage', 'rg', max_wait_minutes=1) + assert result is False + + +def test_check_apim_blob_permissions_custom_wait_time(monkeypatch): + """Test blob permission check with custom max_wait_minutes parameter.""" + call_times = [] + + def fake_sleep(seconds): + call_times.append(seconds) + + def fake_run(cmd, *args, **kwargs): + if 'apim show' in cmd: + return Output(True, 'principal-id\n') + if 'storage account show' in cmd: + return Output(True, '/subscriptions/123/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/storage\n') + if 'role assignment list' in cmd: + return Output(True, '') # Never find it, trigger timeout + return Output(False, 'unexpected') + + monkeypatch.setattr(az, 'run', fake_run) + monkeypatch.setattr(az, 'get_azure_role_guid', lambda *_: 'role-guid') + monkeypatch.setattr(az.time, 'sleep', fake_sleep) + suppress_module_functions(monkeypatch, az, ['print_info', 'print_ok', 'print_warning', 'print_error']) + + result = az.check_apim_blob_permissions('apim', 'storage', 'rg', max_wait_minutes=2) + assert result is False + # Verify sleep was called with correct interval + assert all(seconds == 30 for seconds in call_times) + + def test_get_account_info_all_fields_present(monkeypatch): """Test get_account_info successfully retrieves all account information.""" with patch('azure_resources.run') as mock_run: @@ -978,3 +1048,918 @@ def test_get_account_info_all_fields_present(monkeypatch): assert user_id == 'user-id-xyz' assert tenant_id == 'tenant-abcde' assert subscription_id == 'sub-12345' + + +# ------------------------------ +# UTILITY FUNCTION TESTS +# ------------------------------ + +def test_redact_secrets_with_access_token(): + """Test _redact_secrets redacts accessToken in JSON.""" + text = '{"accessToken": "secretToken123"}' + result = az._redact_secrets(text) + assert 'secretToken123' not in result + assert '***REDACTED***' in result + + +def test_redact_secrets_with_refresh_token(): + """Test _redact_secrets redacts refreshToken in JSON.""" + text = '{"refreshToken": "refreshSecret456"}' + result = az._redact_secrets(text) + assert 'refreshSecret456' not in result + assert '***REDACTED***' in result + + +def test_redact_secrets_with_client_secret(): + """Test _redact_secrets redacts client_secret in JSON.""" + text = '{"client_secret": "clientSecret789"}' + result = az._redact_secrets(text) + assert 'clientSecret789' not in result + assert '***REDACTED***' in result + + +def test_redact_secrets_with_bearer_token(): + """Test _redact_secrets redacts Authorization: Bearer tokens.""" + text = 'Authorization: Bearer myBearerToken123' + result = az._redact_secrets(text) + assert 'myBearerToken123' not in result + assert '***REDACTED***' in result + + +def test_redact_secrets_with_empty_string(): + """Test _redact_secrets handles empty string.""" + assert not az._redact_secrets('') + assert az._redact_secrets(None) is None + + +def test_maybe_add_az_debug_flag_when_debug_enabled(): + """Test _maybe_add_az_debug_flag adds --debug when logging is DEBUG.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + result = az._maybe_add_az_debug_flag('az group list') + assert '--debug' in result + + +def test_maybe_add_az_debug_flag_when_debug_disabled(): + """Test _maybe_add_az_debug_flag doesn't add --debug when logging is not DEBUG.""" + with patch('azure_resources.is_debug_enabled', return_value=False): + result = az._maybe_add_az_debug_flag('az group list') + assert result == 'az group list' + + +def test_maybe_add_az_debug_flag_with_pipe(): + """Test _maybe_add_az_debug_flag handles commands with pipes.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + result = az._maybe_add_az_debug_flag('az group list | jq .') + assert '--debug' in result + assert result.index('--debug') < result.index('|') + + +def test_maybe_add_az_debug_flag_with_redirect(): + """Test _maybe_add_az_debug_flag handles commands with output redirection.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + result = az._maybe_add_az_debug_flag('az group list > output.txt') + assert '--debug' in result + assert result.index('--debug') < result.index('>') + + +def test_maybe_add_az_debug_flag_already_has_debug(): + """Test _maybe_add_az_debug_flag doesn't duplicate --debug flag.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + result = az._maybe_add_az_debug_flag('az group list --debug') + assert result.count('--debug') == 1 + + +def test_maybe_add_az_debug_flag_non_az_command(): + """Test _maybe_add_az_debug_flag doesn't modify non-az commands.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + result = az._maybe_add_az_debug_flag('echo hello') + assert result == 'echo hello' + + +def test_extract_az_cli_error_message_with_json_error(): + """Test _extract_az_cli_error_message extracts from JSON error payload.""" + output = '{"error": {"code": "NotFound", "message": "Resource not found"}}' + result = az._extract_az_cli_error_message(output) + assert result == 'Resource not found' + + +def test_extract_az_cli_error_message_with_json_message(): + """Test _extract_az_cli_error_message extracts from JSON message field.""" + output = '{"message": "Deployment failed"}' + result = az._extract_az_cli_error_message(output) + assert result == 'Deployment failed' + + +def test_extract_az_cli_error_message_with_error_prefix(): + """Test _extract_az_cli_error_message extracts from ERROR: line.""" + output = 'ERROR: Resource group not found' + result = az._extract_az_cli_error_message(output) + assert result == 'Resource group not found' + + +def test_extract_az_cli_error_message_with_az_error_prefix(): + """Test _extract_az_cli_error_message extracts from az: error: line.""" + output = 'az: error: argument --name is required' + result = az._extract_az_cli_error_message(output) + assert result == 'argument --name is required' + + +def test_extract_az_cli_error_message_with_code_and_message(): + """Test _extract_az_cli_error_message combines Code: and Message: lines.""" + output = 'Code: ResourceNotFound\nMessage: The resource was not found' + result = az._extract_az_cli_error_message(output) + assert 'ResourceNotFound' in result + assert 'The resource was not found' in result + + +def test_extract_az_cli_error_message_with_empty_string(): + """Test _extract_az_cli_error_message handles empty string.""" + assert not az._extract_az_cli_error_message('') + + +def test_extract_az_cli_error_message_skips_traceback(): + """Test _extract_az_cli_error_message skips traceback lines.""" + output = 'Some error\nTraceback (most recent call last):\n File "test.py"' + result = az._extract_az_cli_error_message(output) + assert result == 'Some error' + + +def test_extract_az_cli_error_message_skips_warnings(): + """Test _extract_az_cli_error_message skips warning lines.""" + output = 'WARNING: This is deprecated\nERROR: Real error here' + result = az._extract_az_cli_error_message(output) + assert result == 'Real error here' + + +def test_extract_az_cli_error_message_with_ansi_codes(): + """Test _extract_az_cli_error_message strips ANSI codes.""" + output = '\x1b[31mERROR: Resource failed\x1b[0m' + result = az._extract_az_cli_error_message(output) + assert result == 'Resource failed' + + +def test_extract_az_cli_error_message_finds_first_non_empty_line(): + """Test _extract_az_cli_error_message returns first meaningful line.""" + output = '\n\n\nSome error occurred\nMore details' + result = az._extract_az_cli_error_message(output) + assert result == 'Some error occurred' + + +def test_looks_like_json_with_valid_json(): + """Test _looks_like_json identifies JSON strings.""" + assert az._looks_like_json('{"key": "value"}') is True + assert az._looks_like_json('[1, 2, 3]') is True + + +def test_looks_like_json_with_non_json(): + """Test _looks_like_json rejects non-JSON strings.""" + assert az._looks_like_json('plain text') is False + assert az._looks_like_json('') is False + + +def test_strip_ansi_removes_codes(): + """Test _strip_ansi removes ANSI escape codes.""" + text = '\x1b[31mRed text\x1b[0m normal' + result = az._strip_ansi(text) + assert '\x1b' not in result + assert 'Red text' in result + assert 'normal' in result + + +def test_is_az_command_recognizes_az_commands(): + """Test _is_az_command identifies az CLI commands.""" + assert az._is_az_command('az group list') is True + assert az._is_az_command(' az account show ') is True + assert az._is_az_command('az') is True + + +def test_is_az_command_rejects_non_az_commands(): + """Test _is_az_command rejects non-az commands.""" + assert az._is_az_command('echo hello') is False + assert az._is_az_command('python script.py') is False + assert az._is_az_command('azurecli') is False + + +def test_run_with_exception_in_subprocess(): + """Test run() handles subprocess exceptions gracefully.""" + with patch('azure_resources.subprocess.run') as mock_subprocess: + mock_subprocess.side_effect = Exception('Subprocess failed') + + result = az.run('az group list') + + assert result.success is False + assert 'Subprocess failed' in result.text + + +def test_run_with_stderr_only(): + """Test run() handles commands that only output to stderr.""" + with patch('azure_resources.subprocess.run') as mock_subprocess: + mock_process = Mock() + mock_process.returncode = 0 + mock_process.stdout = '' + mock_process.stderr = 'Some warning message' + mock_subprocess.return_value = mock_process + + result = az.run('az group list') + + assert result.success is True + + +def test_run_with_az_debug_flag_already_present(): + """Test run() doesn't duplicate --debug flag.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + with patch('azure_resources.subprocess.run') as mock_subprocess: + mock_process = Mock() + mock_process.returncode = 0 + mock_process.stdout = '[]' + mock_process.stderr = '' + mock_subprocess.return_value = mock_process + + az.run('az group list --debug') + + # Check that --debug appears only once in the command + called_command = mock_subprocess.call_args[0][0] + assert called_command.count('--debug') == 1 + + +def test_run_with_json_output_success(): + """Test run() with successful JSON output.""" + with patch('azure_resources.subprocess.run') as mock_subprocess: + mock_process = Mock() + mock_process.returncode = 0 + mock_process.stdout = '{"result": "success"}' + mock_process.stderr = '' + mock_subprocess.return_value = mock_process + + result = az.run('az group show --name test-rg') + + assert result.success is True + assert '{"result": "success"}' in result.text + + +def test_run_with_complex_shell_expression(): + """Test run() handles complex shell expressions with operators.""" + with patch('azure_resources.is_debug_enabled', return_value=True): + with patch('azure_resources.subprocess.run') as mock_subprocess: + mock_process = Mock() + mock_process.returncode = 0 + mock_process.stdout = 'output' + mock_process.stderr = '' + mock_subprocess.return_value = mock_process + + az.run('az group list || echo "failed"') + + # --debug should be inserted before the || + called_command = mock_subprocess.call_args[0][0] + debug_pos = called_command.find('--debug') + pipe_pos = called_command.find('||') + assert debug_pos < pipe_pos + +# ======================================== +# ADDITIONAL COVERAGE TESTS (MIGRATED) +# ======================================== + + +class TestStripAnsi: + """Test ANSI escape sequence removal.""" + + def test_strip_ansi_with_color_codes(self): + text = '\x1b[1;32mSuccess\x1b[0m' + result = az._strip_ansi(text) + assert result == 'Success' + + def test_strip_ansi_with_multiple_codes(self): + text = '\x1b[31mError\x1b[0m \x1b[1;33mWarning\x1b[0m' + result = az._strip_ansi(text) + assert result == 'Error Warning' + + def test_strip_ansi_with_no_codes(self): + text = 'Plain text' + result = az._strip_ansi(text) + assert result == 'Plain text' + + def test_strip_ansi_empty_string(self): + result = az._strip_ansi('') + assert not result + + +class TestRedactSecrets: + """Test secret redaction in output.""" + + def test_redact_access_token(self): + text = '{"accessToken": "secret-token-value"}' + result = az._redact_secrets(text) + assert 'secret-token-value' not in result + assert '***REDACTED***' in result + + def test_redact_refresh_token(self): + text = '{"refreshToken": "my-refresh-token"}' + result = az._redact_secrets(text) + assert 'my-refresh-token' not in result + assert '***REDACTED***' in result + + def test_redact_client_secret(self): + text = '{"client_secret": "super-secret"}' + result = az._redact_secrets(text) + assert 'super-secret' not in result + assert '***REDACTED***' in result + + def test_redact_bearer_token(self): + text = 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9' + result = az._redact_secrets(text) + assert 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9' not in result + assert '***REDACTED***' in result + + def test_redact_empty_string(self): + result = az._redact_secrets('') + assert not result + + def test_redact_none_value(self): + result = az._redact_secrets(None) + assert result is None + + def test_redact_case_insensitive(self): + text = '{"AccessToken": "secret"}' + result = az._redact_secrets(text) + assert 'secret' not in result + + +class TestIsAzCommand: + """Test Azure CLI command detection.""" + + def test_is_az_command_with_whitespace(self): + assert az._is_az_command(' az group list') is True + assert az._is_az_command('az account show ') is True + + def test_is_az_command_with_arguments(self): + assert az._is_az_command('az group list -g test') is True + assert az._is_az_command('az account show -o json') is True + assert az._is_az_command('az apim list --query') is True + + def test_is_az_command_just_az(self): + assert az._is_az_command('az') is True + + def test_is_not_az_command(self): + assert az._is_az_command('echo hello') is False + assert az._is_az_command('python script.py') is False + assert az._is_az_command('azurecli list') is False + assert az._is_az_command('') is False + + +class TestMaybeAddAzDebugFlag: + """Test adding --debug flag to az commands.""" + + def test_add_debug_flag_disabled_logging(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: False) + + command = 'az group list' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' not in result + assert result == command + + def test_add_debug_flag_non_az_command(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'python script.py' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' not in result + + def test_add_debug_flag_already_present(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'az group list --debug' + result = az._maybe_add_az_debug_flag(command) + assert result.count('--debug') == 1 + + def test_add_debug_flag_before_pipe(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'az group list | grep test' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' in result + assert result.index('--debug') < result.index('|') + + def test_add_debug_flag_before_redirect(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'az group list > output.txt' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' in result + assert result.index('--debug') < result.index('>') + + def test_add_debug_flag_before_or_operator(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'az group list || echo failed' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' in result + + def test_add_debug_flag_before_and_operator(self, monkeypatch): + monkeypatch.setattr('azure_resources.is_debug_enabled', lambda: True) + + command = 'az group list && az account show' + result = az._maybe_add_az_debug_flag(command) + assert '--debug' in result + + +class TestExtractAzCliErrorMessage: + """Test Azure CLI error message extraction.""" + + def test_extract_json_error_with_error_object(self): + output = '{"error": {"message": "Resource not found"}}' + result = az._extract_az_cli_error_message(output) + assert result == 'Resource not found' + + def test_extract_json_error_with_message_field(self): + output = '{"message": "Operation failed"}' + result = az._extract_az_cli_error_message(output) + assert result == 'Operation failed' + + def test_extract_error_prefix(self): + output = 'ERROR: Resource group not found' + result = az._extract_az_cli_error_message(output) + assert result == 'Resource group not found' + + def test_extract_az_error_prefix(self): + output = 'az: error: Invalid argument' + result = az._extract_az_cli_error_message(output) + assert result == 'Invalid argument' + + def test_extract_code_and_message(self): + output = 'Code: AuthenticationFailed\nMessage: Token expired' + result = az._extract_az_cli_error_message(output) + assert result == 'AuthenticationFailed: Token expired' + + def test_extract_message_only(self): + output = 'Some other line\nMessage: Parameter is required' + result = az._extract_az_cli_error_message(output) + assert 'Parameter is required' in result or result == 'Message: Parameter is required' + + def test_extract_empty_output(self): + result = az._extract_az_cli_error_message('') + assert not result + + def test_extract_none_output(self): + result = az._extract_az_cli_error_message(None) + assert not result + + def test_extract_with_ansi_codes(self): + output = '\x1b[31mERROR: \x1b[0mOperation failed' + result = az._extract_az_cli_error_message(output) + assert 'Operation failed' in result + + def test_extract_json_in_middle_of_text(self): + output = 'Some output\n{"error": {"message": "Actual error"}}\nMore text' + result = az._extract_az_cli_error_message(output) + assert result == 'Actual error' + + def test_extract_with_traceback(self): + output = 'Traceback (most recent call last):\n File "test.py"\nError: Something failed' + result = az._extract_az_cli_error_message(output) + assert 'Traceback' not in result + + def test_extract_warning_ignored(self): + output = 'WARNING: Something\nERROR: Actual error' + result = az._extract_az_cli_error_message(output) + assert result == 'Actual error' + + def test_extract_only_empty_lines(self): + output = '\n\nTraceback (most recent call last):\n' + result = az._extract_az_cli_error_message(output) + assert not result + + +class TestFormatDuration: + """Test duration formatting.""" + + def test_format_duration_seconds(self): + start_time = time.time() - 5 + result = az._format_duration(start_time) + assert '[0m:' in result + assert 's]' in result + + def test_format_duration_minutes_and_seconds(self): + start_time = time.time() - 65 + result = az._format_duration(start_time) + assert '[1m:' in result + + +class TestLooksLikeJson: + """Test JSON detection.""" + + def test_looks_like_json_with_object(self): + assert az._looks_like_json('{"key": "value"}') is True + assert az._looks_like_json(' {"key": "value"}') is True + + def test_looks_like_json_with_array(self): + assert az._looks_like_json('[1, 2, 3]') is True + assert az._looks_like_json(' [1, 2, 3]') is True + + def test_looks_like_json_with_invalid(self): + assert az._looks_like_json('{"key": value}') is False + assert az._looks_like_json('not json') is False + + def test_looks_like_json_empty(self): + assert az._looks_like_json('') is False + + def test_looks_like_json_only_whitespace(self): + assert az._looks_like_json(' ') is False + + def test_looks_like_json_xml(self): + assert az._looks_like_json('') is False + + def test_looks_like_json_plain_text(self): + assert az._looks_like_json('plain text') is False + + +class TestRunFunctionEdgeCases: + """Test edge cases in the run() function.""" + + def test_run_with_stderr_only(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_error', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = '' + mock_completed.stderr = 'Some warning' + + monkeypatch.setattr('azure_resources.subprocess.run', lambda *a, **k: mock_completed) + + result = az.run('echo test') + assert result.success is True + + def test_run_with_empty_output(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = '' + mock_completed.stderr = '' + + monkeypatch.setattr('azure_resources.subprocess.run', lambda *a, **k: mock_completed) + + result = az.run('echo test') + assert result.success is True + assert not result.text + + def test_run_with_none_stdout_stderr(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = None + mock_completed.stderr = None + + monkeypatch.setattr('azure_resources.subprocess.run', lambda *a, **k: mock_completed) + + result = az.run('echo test') + assert result.success is True + + def test_run_with_non_az_command(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = 'output' + mock_completed.stderr = '' + + run_calls = [] + + def mock_run(*args, **kwargs): + run_calls.append((args, kwargs)) + return mock_completed + + monkeypatch.setattr('azure_resources.subprocess.run', mock_run) + + result = az.run('echo test') + assert result.success is True + assert len(run_calls) == 1 + + def test_run_with_json_stdout(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = '{"key": "value"}' + mock_completed.stderr = '' + + monkeypatch.setattr('azure_resources.subprocess.run', lambda *a, **k: mock_completed) + + result = az.run('az group list -o json') + assert result.success is True + assert result.json_data == {'key': 'value'} + + def test_run_command_with_special_characters(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_command', 'print_ok']) + + mock_completed = Mock() + mock_completed.returncode = 0 + mock_completed.stdout = 'output' + mock_completed.stderr = '' + + monkeypatch.setattr('azure_resources.subprocess.run', lambda *a, **k: mock_completed) + + result = az.run('echo "test with spaces" && echo done') + assert result.success is True + + +class TestGetAccountInfoEdgeCases: + """Test edge cases in get_account_info().""" + + def test_get_account_info_partial_failure(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_error']) + + account_output = Mock() + account_output.success = True + account_output.json_data = { + 'user': {'name': 'test@example.com'}, + 'tenantId': 'tenant-123', + 'id': 'subscription-123' + } + + ad_output = Mock() + ad_output.success = False + ad_output.json_data = None + + call_count = [0] + + def mock_run(cmd, *args, **kwargs): + call_count[0] += 1 + if 'account show' in cmd: + return account_output + return ad_output + + monkeypatch.setattr('azure_resources.run', mock_run) + + with pytest.raises(Exception): + az.get_account_info() + + def test_get_account_info_success(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_error']) + + account_output = Mock() + account_output.success = True + account_output.json_data = { + 'user': {'name': 'test@example.com'}, + 'tenantId': 'tenant-123', + 'id': 'subscription-123' + } + + ad_output = Mock() + ad_output.success = True + ad_output.json_data = {'id': 'user-123'} + + call_count = [0] + + def mock_run(cmd, *args, **kwargs): + call_count[0] += 1 + if 'account show' in cmd: + return account_output + return ad_output + + monkeypatch.setattr('azure_resources.run', mock_run) + + user, user_id, tenant, subscription = az.get_account_info() + assert user == 'test@example.com' + assert user_id == 'user-123' + assert tenant == 'tenant-123' + assert subscription == 'subscription-123' + + +class TestGetDeploymentName: + """Test get_deployment_name function.""" + + def test_get_deployment_name_custom_directory(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + result = az.get_deployment_name('my-sample') + assert 'deploy-my-sample-' in result + + +class TestGetFrontdoorUrl: + """Test get_frontdoor_url function.""" + + def test_get_frontdoor_url_not_found(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + mock_output = Mock() + mock_output.success = False + mock_output.json_data = None + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.get_frontdoor_url(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') + assert result is None + + +class TestGetApimUrl: + """Test get_apim_url function.""" + + def test_get_apim_url_no_results(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + mock_output = Mock() + mock_output.success = True + mock_output.json_data = [] + mock_output.is_json = True + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.get_apim_url('test-rg') + assert result is None + + +class TestListApimSubscriptions: + """Test list_apim_subscriptions function.""" + + def test_list_apim_subscriptions_success(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + mock_output = Mock() + mock_output.success = True + mock_output.json_data = { + 'value': [ + {'id': 'sub-1', 'displayName': 'Subscription 1'}, + {'id': 'sub-2', 'displayName': 'Subscription 2'} + ] + } + mock_output.is_json = True + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.list_apim_subscriptions('test-apim', 'test-rg') + assert len(result) == 2 + assert result[0]['id'] == 'sub-1' + + def test_list_apim_subscriptions_empty(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + mock_output = Mock() + mock_output.success = True + mock_output.json_data = {'value': []} + mock_output.is_json = True + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.list_apim_subscriptions('test-apim', 'test-rg') + assert result == [] + + def test_list_apim_subscriptions_failure(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + mock_output = Mock() + mock_output.success = False + mock_output.json_data = None + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.list_apim_subscriptions('test-apim', 'test-rg') + assert result == [] + + +class TestGetAppGwEndpoint: + """Test get_appgw_endpoint function.""" + + def test_get_appgw_endpoint_not_found(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + with patch('azure_resources.run') as mock_run: + mock_run.return_value = Output(False, 'No gateways found') + + hostname, ip = az.get_appgw_endpoint('test-rg') + + assert hostname is None + assert ip is None + + +class TestGetUniqueInfraSuffix: + """Test get_unique_suffix_for_resource_group function.""" + + def test_get_unique_suffix_empty_rg(self): + result = az.get_unique_suffix_for_resource_group('') + assert isinstance(result, str) + + +class TestFindInfrastructureInstances: + """Test find_infrastructure_instances function.""" + + def test_find_infrastructure_instances_no_matches(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_message']) + + def mock_run(cmd, *args, **kwargs): + output = Mock() + output.success = True + output.text = '' + return output + + monkeypatch.setattr('azure_resources.run', mock_run) + + result = az.find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) + assert result == [] + + +class TestGetInfraRgName: + """Test get_infra_rg_name function.""" + + def test_get_infra_rg_name_with_index(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + result = az.get_infra_rg_name(INFRASTRUCTURE.SIMPLE_APIM, 1) + assert 'simple-apim' in result + assert '1' in result + + def test_get_infra_rg_name_without_index(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + result = az.get_infra_rg_name(INFRASTRUCTURE.APIM_ACA) + assert 'apim-aca' in result + + +class TestGetRgName: + """Test get_rg_name function.""" + + def test_get_rg_name_with_index(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + result = az.get_rg_name('my-sample', 2) + assert 'my-sample' in result + assert '2' in result + + def test_get_rg_name_without_index(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val']) + + result = az.get_rg_name('test-deployment') + assert 'test-deployment' in result + assert '-test-deployment' in result + + +class TestCheckApimBlobPermissions: + """Test check_apim_blob_permissions function.""" + + def test_check_apim_blob_permissions_no_principal_id(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_info', 'print_error', 'print_warning']) + + mock_output = Mock() + mock_output.success = False + mock_output.json_data = None + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.check_apim_blob_permissions('apim', 'storage', 'rg', max_wait_minutes=1) + assert result is False + + +class TestCleanupOldJwtSigningKeys: + """Test cleanup_old_jwt_signing_keys function.""" + + def test_cleanup_old_jwt_no_other_keys(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_info', 'print_message']) + + mock_output = Mock() + mock_output.success = True + mock_output.json_data = [{'name': 'JwtSigningKey-authX-12345'}] + mock_output.is_json = True + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'JwtSigningKey-authX-12345') + assert isinstance(result, bool) + + def test_cleanup_old_jwt_list_fails(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_val', 'print_info', 'print_message', 'print_error']) + + mock_output = Mock() + mock_output.success = False + mock_output.json_data = None + + monkeypatch.setattr('azure_resources.run', lambda *a, **k: mock_output) + + result = az.cleanup_old_jwt_signing_keys('apim', 'rg', 'JwtSigningKey-authX-12345') + assert result is False + + +class TestGetApimSubscriptionKey: + """Test get_apim_subscription_key function.""" + + def test_get_apim_subscription_key_invalid_params(self): + result = az.get_apim_subscription_key('', 'rg') + assert result is None + + result = az.get_apim_subscription_key('apim', '') + assert result is None + + +class TestGetEndpoints: + """Test get_endpoints function.""" + + def test_get_endpoints_with_simple_apim(self, monkeypatch): + suppress_module_functions(monkeypatch, az, ['print_message', 'print_val']) + + monkeypatch.setattr('azure_resources.get_frontdoor_url', lambda *a, **k: None) + monkeypatch.setattr('azure_resources.get_apim_url', lambda *a, **k: 'https://apim.azure-api.net') + monkeypatch.setattr('azure_resources.get_appgw_endpoint', lambda *a, **k: (None, None)) + + result = az.get_endpoints(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') + + assert result is not None + assert result.apim_endpoint_url == 'https://apim.azure-api.net' diff --git a/tests/python/test_azure_resources_run.py b/tests/python/test_azure_resources_run.py index 84fc37f..d906d61 100644 --- a/tests/python/test_azure_resources_run.py +++ b/tests/python/test_azure_resources_run.py @@ -8,12 +8,13 @@ import logging from types import SimpleNamespace -from unittest.mock import Mock, patch +from unittest.mock import patch import pytest # APIM Samples imports import azure_resources as az +from test_helpers import mock_module_functions class _FakeLock: @@ -32,10 +33,7 @@ def __exit__(self, exc_type, exc, tb) -> None: def _quiet_console(monkeypatch: pytest.MonkeyPatch) -> None: """Silence console facade functions so tests don't emit output.""" - monkeypatch.setattr(az, 'print_command', Mock()) - monkeypatch.setattr(az, 'print_plain', Mock()) - monkeypatch.setattr(az, 'print_ok', Mock()) - monkeypatch.setattr(az, 'print_error', Mock()) + mock_module_functions(monkeypatch, az, ['print_command', 'print_plain', 'print_ok', 'print_error']) def test_run_adds_az_debug_flag_and_keeps_stdout_clean_when_success(_quiet_console: None) -> None: diff --git a/tests/python/test_charts.py b/tests/python/test_charts.py index ec20824..aead968 100644 --- a/tests/python/test_charts.py +++ b/tests/python/test_charts.py @@ -5,7 +5,9 @@ from unittest.mock import patch, MagicMock import sys import os +import json import pytest +import pandas as pd from charts import BarChart # Add the shared/python directory to the Python path @@ -445,3 +447,120 @@ def test_backend_index_edge_cases(): assert call_args[1]['Backend Index'] == 99 # Missing index field assert call_args[2]['Backend Index'] == 99 # Empty JSON assert call_args[3]['Backend Index'] == 99 # Non-200 status + + +@patch('charts.plt') +@patch('charts.pd') +def test_average_line_calculation_normal_data(mock_pd, mock_plt, sample_api_results): + """Test average line calculation with normal data (no extreme outliers).""" + + # Create real DataFrame to test filtering logic + chart = BarChart('Test', 'X', 'Y', sample_api_results) + + # Build the real rows as the code does + rows = [] + for entry in sample_api_results: + run = entry['run'] + response_time = entry['response_time'] + status_code = entry['status_code'] + if status_code == 200 and entry['response']: + try: + resp = json.loads(entry['response']) + backend_index = resp.get('index', 99) + except Exception: + backend_index = 99 + else: + backend_index = 99 + rows.append({ + 'Run': run, + 'Response Time (ms)': response_time * 1000, + 'Backend Index': backend_index, + 'Status Code': status_code + }) + + real_df = pd.DataFrame(rows) + mock_pd.DataFrame.return_value = real_df + + # Mock plotting methods + mock_ax = MagicMock() + with patch.object(real_df, 'plot', return_value=mock_ax): + chart._plot_barchart(sample_api_results) + + # Verify average line was plotted + mock_plt.axhline.assert_called() + mock_plt.text.assert_called() + + +@patch('charts.plt') +@patch('charts.pd') +def test_average_line_calculation_with_outlier(mock_pd, mock_plt): + """Test average line calculation when data has high outliers.""" + + # Create data with a high outlier + results_with_outlier = [ + {'run': 1, 'response_time': 0.1, 'status_code': 200, 'response': '{"index": 1}'}, + {'run': 2, 'response_time': 0.12, 'status_code': 200, 'response': '{"index": 1}'}, + {'run': 3, 'response_time': 0.11, 'status_code': 200, 'response': '{"index": 1}'}, + {'run': 4, 'response_time': 0.13, 'status_code': 200, 'response': '{"index": 1}'}, + {'run': 5, 'response_time': 5.0, 'status_code': 200, 'response': '{"index": 1}'}, # Outlier + ] + + chart = BarChart('Test', 'X', 'Y', results_with_outlier) + + # Build real rows + rows = [] + for entry in results_with_outlier: + resp = json.loads(entry['response']) + rows.append({ + 'Run': entry['run'], + 'Response Time (ms)': entry['response_time'] * 1000, + 'Backend Index': resp.get('index', 99), + 'Status Code': entry['status_code'] + }) + + real_df = pd.DataFrame(rows) + mock_pd.DataFrame.return_value = real_df + + # Mock plotting + mock_ax = MagicMock() + with patch.object(real_df, 'plot', return_value=mock_ax): + chart._plot_barchart(results_with_outlier) + + # Verify average line calculation excluded the outlier + mock_plt.axhline.assert_called() + mock_plt.text.assert_called() + + +@patch('charts.plt') +@patch('charts.pd') +def test_average_line_all_data_outliers(mock_pd, mock_plt): + """Test average line calculation when all data points are outliers (edge case).""" + + # Create data where all points are very high + all_outlier_results = [ + {'run': 1, 'response_time': 10.0, 'status_code': 200, 'response': '{"index": 1}'}, + {'run': 2, 'response_time': 11.0, 'status_code': 200, 'response': '{"index": 1}'}, + ] + + chart = BarChart('Test', 'X', 'Y', all_outlier_results) + + rows = [] + for entry in all_outlier_results: + resp = json.loads(entry['response']) + rows.append({ + 'Run': entry['run'], + 'Response Time (ms)': entry['response_time'] * 1000, + 'Backend Index': resp.get('index', 99), + 'Status Code': entry['status_code'] + }) + + real_df = pd.DataFrame(rows) + mock_pd.DataFrame.return_value = real_df + + mock_ax = MagicMock() + with patch.object(real_df, 'plot', return_value=mock_ax): + chart._plot_barchart(all_outlier_results) + + # Should still plot average line + mock_plt.axhline.assert_called() + mock_plt.text.assert_called() diff --git a/tests/python/test_console.py b/tests/python/test_console.py index e0bae58..80714fd 100644 --- a/tests/python/test_console.py +++ b/tests/python/test_console.py @@ -1,54 +1,12 @@ -""" -Unit tests for the console module. +"""Unit tests for the console module.""" -Tests all public console output functions including formatting, colors, -thread safety, and various output options. -""" - -import io import logging import threading -import console - -# ------------------------------ -# HELPER FUNCTIONS -# ------------------------------ +import console -def capture_output(func, *args, **kwargs): - """ - Capture console logging output from a function call. - - Args: - func: Function to call - *args: Positional arguments for the function - **kwargs: Keyword arguments for the function - - Returns: - str: Captured output - """ - captured_output = io.StringIO() - - logger = logging.getLogger('console') - previous_level = logger.level - previous_handlers = list(logger.handlers) - previous_propagate = logger.propagate - - handler = logging.StreamHandler(captured_output) - handler.setFormatter(logging.Formatter('%(message)s')) - - # Route console messages only to our in-memory stream for deterministic tests. - logger.handlers = [handler] - logger.setLevel(logging.DEBUG) - logger.propagate = False - - try: - func(*args, **kwargs) - return captured_output.getvalue() - finally: - logger.handlers = previous_handlers - logger.setLevel(previous_level) - logger.propagate = previous_propagate +# APIM Samples imports +from test_helpers import capture_console_output as capture_output # ------------------------------ diff --git a/tests/python/test_helpers.py b/tests/python/test_helpers.py new file mode 100644 index 0000000..e1090ec --- /dev/null +++ b/tests/python/test_helpers.py @@ -0,0 +1,708 @@ +""" +Shared test helpers, mock factories, and assertion utilities. +""" + +import io +import logging +import builtins +from collections.abc import Callable +from unittest.mock import Mock, MagicMock, mock_open, patch +import json as json_module + +# APIM Samples imports +from apimtypes import APIM_SKU, APIMNetworkMode, API, APIOperation, PolicyFragment, Output, HTTP_VERB + + +# ------------------------------ +# PATCH HELPERS +# ------------------------------ + +def suppress_module_functions(monkeypatch, module, names: list[str]) -> None: + """Suppress noisy functions on a module by replacing them with a no-op.""" + + def _noop(*args, **kwargs): + return None + + for name in names: + monkeypatch.setattr(module, name, _noop) + + +def mock_module_functions(monkeypatch, module, names: list[str]) -> dict[str, Mock]: + """Replace module functions with Mock instances. + + Returns a dict of name -> mock for convenience in assertions. + """ + + mocks: dict[str, Mock] = {} + for name in names: + mock = Mock() + monkeypatch.setattr(module, name, mock) + mocks[name] = mock + return mocks + + +def patch_module_thread_safe_printing( + monkeypatch, + module, + *, + print_log: Callable[..., object] | None = None, + lock: object | None = None, + lock_attr: str = '_print_lock', + log_attr: str = '_print_log' +) -> object: + """Patch a module's internal thread-safe printing primitives. + + Many modules use a lock + print-log function internally to serialize output. + This helper standardizes patching those attributes to reduce per-test boilerplate. + + Args: + monkeypatch: pytest monkeypatch fixture + module: module under test + print_log: function to install as the module's log function + lock: object to install as the module's lock (defaults to MagicMock) + lock_attr: attribute name of the lock on the module + log_attr: attribute name of the log function on the module + + Returns: + The lock object that was installed. + """ + + if lock is None: + lock = MagicMock() + + if print_log is None: + def _noop(*args, **kwargs): + return None + + print_log = _noop + + monkeypatch.setattr(module, lock_attr, lock) + monkeypatch.setattr(module, log_attr, print_log) + return lock + + +def capture_module_print_log( + monkeypatch, + module, + *, + lock_attr: str = '_print_lock', + log_attr: str = '_print_log' +) -> list[dict[str, object]]: + """Capture calls to a module's internal print-log function. + + Returns a list of dict entries with keys: msg, icon, color, kwargs. + """ + + calls: list[dict[str, object]] = [] + + def _print_log(msg, icon, color, **kwargs): + calls.append({'msg': msg, 'icon': icon, 'color': color, 'kwargs': kwargs}) + + patch_module_thread_safe_printing( + monkeypatch, + module, + print_log=_print_log, + lock_attr=lock_attr, + log_attr=log_attr + ) + return calls + +def patch_open_for_text_read( + monkeypatch, + *, + match: str | Callable[[str], bool], + read_data: str | None = None, + raises: Exception | None = None +): + """Patch builtins.open for a specific text-mode path match. + + Only intercepts when 'b' is not present in the requested mode. + All other opens are delegated to the real built-in open. + """ + real_open = builtins.open + open_mock = mock_open(read_data=read_data) if read_data is not None else None + + def open_selector(file, *args, **kwargs): + mode = kwargs.get('mode', args[0] if args else 'r') + file_str = str(file) + is_match = match(file_str) if callable(match) else file_str == str(match) + + if is_match and 'b' not in mode: + if raises is not None: + raise raises + return open_mock(file, *args, **kwargs) + + return real_open(file, *args, **kwargs) + + monkeypatch.setattr(builtins, 'open', open_selector) + return open_mock + + +def mock_popen(monkeypatch, *, stdout_lines: list[str], returncode: int = 0) -> None: + """Patch subprocess.Popen with a context-manager friendly mock process.""" + + class MockProcess: + def __init__(self, *args, **kwargs): + self.returncode = returncode + self.stdout = iter(stdout_lines) + + def wait(self): + return None + + def __enter__(self): + return self + + def __exit__(self, *args): + return False + + monkeypatch.setattr('subprocess.Popen', MockProcess) + + +def patch_os_paths( + monkeypatch, + *, + cwd: str = '/test/dir', + exists: bool | Callable[[str], bool] = True, + basename: str | Callable[[str], str] = 'test-dir' +) -> None: + """Patch common os.getcwd / os.path.exists / os.path.basename for tests.""" + monkeypatch.setattr('os.getcwd', MagicMock(return_value=cwd)) + + if callable(exists): + monkeypatch.setattr('os.path.exists', exists) + else: + monkeypatch.setattr('os.path.exists', MagicMock(return_value=exists)) + + if callable(basename): + monkeypatch.setattr('os.path.basename', basename) + else: + monkeypatch.setattr('os.path.basename', MagicMock(return_value=basename)) + + +def patch_create_bicep_deployment_group_dependencies( + monkeypatch, + *, + az_module, + run_success: bool = True, + cwd: str = '/test/dir', + exists: bool | Callable[[str], bool] = True, + basename: str | Callable[[str], str] = 'test-dir' +): + """Patch common dependencies for utils.create_bicep_deployment_group tests. + + Returns: + tuple: (mock_create_resource_group, mock_az_run, mock_open) + """ + mock_create_rg = MagicMock() + monkeypatch.setattr(az_module, 'create_resource_group', mock_create_rg) + + mock_run = MagicMock(return_value=MagicMock(success=run_success)) + monkeypatch.setattr(az_module, 'run', mock_run) + + open_mock = mock_open() + monkeypatch.setattr(builtins, 'open', open_mock) + monkeypatch.setattr(builtins, 'print', MagicMock()) + + patch_os_paths(monkeypatch, cwd=cwd, exists=exists, basename=basename) + + return mock_create_rg, mock_run, open_mock + + +# ------------------------------ +# MOCK FACTORIES +# ------------------------------ + +def create_mock_output(success: bool = True, text: str = '', json_data: dict | None = None) -> Output: + """ + Factory for creating consistent mock Azure CLI Output objects. + + Args: + success: Whether the command succeeded + text: Text output from command + json_data: JSON data from command + + Returns: + Output object configured with provided values + """ + output = Output(success, text) + if json_data is not None: + output.json_data = json_data + return output + + +def create_mock_az_module( + rg_exists: bool = True, + rg_name: str = 'rg-test-infrastructure-01', + account_info: tuple = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription'), + resource_suffix: str = 'abc123def456', + run_success: bool = True, + run_output: dict | str | None = None +): + """ + Factory for creating a mock azure_resources (az) module. + + Args: + rg_exists: Whether resource group exists + rg_name: Resource group name to return + account_info: Tuple of (user, user_id, tenant, subscription) + resource_suffix: Unique suffix for resources + run_success: Default success state for az.run calls + run_output: Default output for az.run calls + + Returns: + Mock configured with common azure_resources patterns + """ + mock_az = Mock() + mock_az.get_infra_rg_name.return_value = rg_name + mock_az.create_resource_group.return_value = None + mock_az.does_resource_group_exist.return_value = rg_exists + mock_az.get_account_info.return_value = account_info + mock_az.get_unique_suffix_for_resource_group.return_value = resource_suffix + + # Configure default run output + if run_output is None: + run_output = {'outputs': 'test'} + + mock_output = Mock() + mock_output.success = run_success + + if isinstance(run_output, dict): + mock_output.json_data = run_output + mock_output.get.return_value = 'https://test-apim.azure-api.net' + mock_output.getJson.return_value = ['api1', 'api2'] + else: + mock_output.text = run_output + + mock_az.run.return_value = mock_output + + return mock_az + + +def create_mock_utils_module( + tags: dict | None = None, + policy_xml: str = '', + policy_path: str = '/mock/path/policy.xml', + verify_result: bool = True +): + """ + Factory for creating a mock utils module. + + Args: + tags: Infrastructure tags to return + policy_xml: XML content for policies + policy_path: Path to policy files + verify_result: Result of infrastructure verification + + Returns: + Mock configured with common utils patterns + """ + if tags is None: + tags = {'environment': 'test', 'project': 'apim-samples'} + + mock_utils = Mock() + mock_utils.build_infrastructure_tags.return_value = tags + mock_utils.read_policy_xml.return_value = policy_xml + mock_utils.determine_shared_policy_path.return_value = policy_path + mock_utils.verify_infrastructure.return_value = verify_result + + return mock_utils + + +def create_sample_policy_fragments(count: int = 2) -> list[PolicyFragment]: + """ + Factory for creating sample PolicyFragment objects for testing. + + Args: + count: Number of policy fragments to create + + Returns: + List of PolicyFragment objects + """ + return [ + PolicyFragment( + f'Test-Fragment-{i+1}', + f'test{i+1}', + f'Test fragment {i+1}' + ) + for i in range(count) + ] + + +def create_sample_apis(count: int = 2) -> list[API]: + """ + Factory for creating sample API objects for testing. + + Args: + count: Number of APIs to create + + Returns: + List of API objects + """ + return [ + API( + f'test-api-{i+1}', + f'Test API {i+1}', + f'/test{i+1}', + f'Test API {i+1} description', + f'api{i+1}' + ) + for i in range(count) + ] + + +def create_sample_api_operations(count: int = 2) -> list[APIOperation]: + """ + Factory for creating sample APIOperation objects for testing. + + Args: + count: Number of operations to create + + Returns: + List of APIOperation objects + """ + verbs = [HTTP_VERB.GET, HTTP_VERB.POST, HTTP_VERB.PUT, HTTP_VERB.DELETE] + return [ + APIOperation( + f'operation-{i+1}', + f'Operation {i+1}', + verbs[i % len(verbs)], + f'/resource{i+1}', + f'operation{i+1}' + ) + for i in range(count) + ] + + +# ------------------------------ +# ASSERTION HELPERS +# ------------------------------ + +def assert_bicep_params_structure(params: dict) -> None: + """ + Verify bicep parameters have the expected structure. + + Args: + params: Bicep parameters dictionary to validate + + Raises: + AssertionError: If structure is invalid + """ + assert isinstance(params, dict), "Bicep params must be a dict" + + # Common required parameters + required_keys = ['location', 'resourceSuffix'] + for key in required_keys: + assert key in params, f"Missing required bicep parameter: {key}" + assert 'value' in params[key], f"Parameter {key} missing 'value' key" + + +def assert_infrastructure_components( + infra, + expected_min_apis: int = 1, + expected_min_pfs: int = 6, + check_rg: bool = True +) -> None: + """ + Verify infrastructure instance has expected components initialized. + + Args: + infra: Infrastructure instance to check + expected_min_apis: Minimum number of APIs expected + expected_min_pfs: Minimum number of policy fragments expected + check_rg: Whether to check resource group attributes + + Raises: + AssertionError: If components don't meet expectations + """ + # Initialize components + apis = infra._define_apis() + pfs = infra._define_policy_fragments() + + assert len(apis) >= expected_min_apis, \ + f"Expected at least {expected_min_apis} APIs, got {len(apis)}" + assert len(pfs) >= expected_min_pfs, \ + f"Expected at least {expected_min_pfs} policy fragments, got {len(pfs)}" + + if check_rg: + assert hasattr(infra, 'rg_name'), "Infrastructure missing rg_name" + assert hasattr(infra, 'rg_location'), "Infrastructure missing rg_location" + assert infra.rg_name, "rg_name should not be empty" + + +def assert_api_structure(api: API, check_operations: bool = False) -> None: + """ + Verify API object has all required fields properly set. + + Args: + api: API object to validate + check_operations: Whether to validate operations list + + Raises: + AssertionError: If API structure is invalid + """ + assert api.name, "API name should not be empty" + assert api.displayName, "API displayName should not be empty" + assert api.path, "API path should not be empty" + assert hasattr(api, 'operations'), "API missing operations attribute" + assert hasattr(api, 'tags'), "API missing tags attribute" + assert hasattr(api, 'productNames'), "API missing productNames attribute" + + if check_operations: + assert isinstance(api.operations, list), "API operations must be a list" + + +def assert_policy_fragment_structure(pf: PolicyFragment) -> None: + """ + Verify PolicyFragment object has all required fields properly set. + + Args: + pf: PolicyFragment object to validate + + Raises: + AssertionError: If PolicyFragment structure is invalid + """ + assert pf.name, "PolicyFragment name should not be empty" + assert pf.policyXml, "PolicyFragment policyXml should not be empty" + assert hasattr(pf, 'description'), "PolicyFragment missing description" + + +# ------------------------------ +# TEST DATA GENERATORS +# ------------------------------ + +def get_sample_bicep_params() -> dict: + """ + Get a sample bicep parameters dictionary for testing. + + Returns: + Dictionary with sample bicep parameters + """ + return { + 'location': {'value': 'eastus2'}, + 'resourceSuffix': {'value': 'abc123'}, + 'apimSku': {'value': 'BasicV2'}, + 'apis': {'value': []}, + 'policyFragments': {'value': []} + } + + +def get_sample_infrastructure_params() -> dict: + """ + Get sample parameters for creating Infrastructure instances. + + Returns: + Dictionary with common infrastructure parameters + """ + return { + 'rg_location': 'eastus2', + 'index': 1, + 'apim_sku': APIM_SKU.BASICV2, + 'networkMode': APIMNetworkMode.PUBLIC + } + + +# ------------------------------ +# HTTP MOCK FACTORIES +# ------------------------------ + +def create_mock_http_response( + status_code: int = 200, + json_data: dict | None = None, + text: str | None = None, + headers: dict | None = None, + raise_for_status_error: Exception | None = None +): + """ + Factory for creating mock HTTP response objects. + + Args: + status_code: HTTP status code + json_data: JSON response data + text: Text response content + headers: Response headers + raise_for_status_error: Exception to raise on raise_for_status() + + Returns: + Mock response object configured for HTTP testing + """ + if headers is None: + headers = {'Content-Type': 'application/json'} + + if json_data is not None and text is None: + text = json_module.dumps(json_data, indent=4) + elif text is None: + text = '' + + mock_response = MagicMock() + mock_response.status_code = status_code + mock_response.headers = headers + mock_response.text = text + + if json_data is not None: + mock_response.json.return_value = json_data + else: + mock_response.json.side_effect = ValueError("No JSON") + + if raise_for_status_error: + mock_response.raise_for_status.side_effect = raise_for_status_error + else: + mock_response.raise_for_status.return_value = None + + return mock_response + + +def create_mock_session_with_response(response): + """ + Factory for creating a mock requests.Session with a predefined response. + + Args: + response: Mock response object or list of responses + + Returns: + Mock Session object + """ + mock_session = MagicMock() + + if isinstance(response, list): + mock_session.request.side_effect = response + else: + mock_session.request.return_value = response + + return mock_session + + +# ------------------------------ +# CONTEXT MANAGERS FOR PATCHING +# ------------------------------ + +class MockApimRequestsPatches: + """ + Context manager for common apimrequests module patches. + Eliminates the need for @patch decorators on every test. + + Usage: + with MockApimRequestsPatches() as mocks: + # mocks.request, mocks.print_message, etc. available + result = apim.singleGet('/path') + """ + + def __init__(self): + self.patches = [] + self.mocks = {} + + def __enter__(self): + patch_targets = [ + ('apimrequests.requests.request', 'request'), + ('apimrequests.print_message', 'print_message'), + ('apimrequests.print_info', 'print_info'), + ('apimrequests.print_error', 'print_error'), + ('apimrequests.print_val', 'print_val'), + ('apimrequests.print_ok', 'print_ok') + ] + + for target, name in patch_targets: + p = patch(target) + mock = p.__enter__() + self.patches.append(p) + setattr(self, name, mock) + self.mocks[name] = mock + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for p in reversed(self.patches): + p.__exit__(exc_type, exc_val, exc_tb) + + +class MockInfrastructuresPatches: + """ + Context manager for common infrastructures module patches. + + Usage: + with MockInfrastructuresPatches() as mocks: + # mocks.az, mocks.utils available + infra = Infrastructure(...) + """ + + def __init__(self): + self.patches = [] + + def __enter__(self): + # Patch az + self.az_patch = patch('infrastructures.az') + self.az = self.az_patch.__enter__() + self.az.get_infra_rg_name.return_value = 'rg-test-infrastructure-01' + self.az.create_resource_group.return_value = None + self.az.does_resource_group_exist.return_value = True + self.az.get_account_info.return_value = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription') + self.az.get_unique_suffix_for_resource_group.return_value = 'abc123def456' + + mock_output = Mock() + mock_output.success = True + mock_output.json_data = {'outputs': 'test'} + mock_output.get.return_value = 'https://test-apim.azure-api.net' + mock_output.getJson.return_value = ['api1', 'api2'] + self.az.run.return_value = mock_output + + self.patches.append(self.az_patch) + + # Patch utils + self.utils_patch = patch('infrastructures.utils') + self.utils = self.utils_patch.__enter__() + self.utils.build_infrastructure_tags.return_value = {'environment': 'test', 'project': 'apim-samples'} + self.utils.read_policy_xml.return_value = '' + self.utils.determine_shared_policy_path.return_value = '/mock/path/policy.xml' + self.utils.verify_infrastructure.return_value = True + + self.patches.append(self.utils_patch) + + # Patch apimtypes._read_policy_xml to prevent file system access in tests + self.apimtypes_read_policy_patch = patch('apimtypes._read_policy_xml') + self.apimtypes_read_policy = self.apimtypes_read_policy_patch.__enter__() + self.apimtypes_read_policy.return_value = '' + self.patches.append(self.apimtypes_read_policy_patch) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for p in reversed(self.patches): + p.__exit__(exc_type, exc_val, exc_tb) + + +# ------------------------------ +# CONSOLE OUTPUT CAPTURE +# ------------------------------ + +def capture_console_output(func: Callable, *args, **kwargs) -> str: + """ + Capture console logging output from a function call. + + Args: + func: Function to call + *args: Positional arguments for the function + **kwargs: Keyword arguments for the function + + Returns: + Captured output as string + """ + captured_output = io.StringIO() + + logger = logging.getLogger('console') + previous_level = logger.level + previous_handlers = list(logger.handlers) + previous_propagate = logger.propagate + + handler = logging.StreamHandler(captured_output) + handler.setFormatter(logging.Formatter('%(message)s')) + + logger.handlers = [handler] + logger.setLevel(logging.DEBUG) + logger.propagate = False + + try: + func(*args, **kwargs) + return captured_output.getvalue() + finally: + logger.handlers = previous_handlers + logger.setLevel(previous_level) + logger.propagate = previous_propagate diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index beb0918..9e1f495 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -11,6 +11,11 @@ import console import infrastructures from apimtypes import INFRASTRUCTURE, APIM_SKU, APIMNetworkMode, API, PolicyFragment, Output +from test_helpers import ( + capture_module_print_log, + patch_module_thread_safe_printing, + suppress_module_functions +) # ------------------------------ @@ -24,57 +29,19 @@ # ------------------------------ -# FIXTURES +# FIXTURE ALIASES # ------------------------------ @pytest.fixture -def mock_utils(): - """Mock the utils module to avoid external dependencies.""" - with patch('infrastructures.utils') as mock_utils: - mock_utils.build_infrastructure_tags.return_value = {'environment': 'test', 'project': 'apim-samples'} - mock_utils.read_policy_xml.return_value = '' - mock_utils.determine_shared_policy_path.return_value = '/mock/path/policy.xml' - mock_utils.verify_infrastructure.return_value = True - - yield mock_utils - - -@pytest.fixture(autouse = True) -def mock_az(): - """Mock the azure_resources module used by infrastructures.""" - - with patch('infrastructures.az') as mock_az: - mock_az.get_infra_rg_name.return_value = 'rg-test-infrastructure-01' - mock_az.create_resource_group.return_value = None - mock_az.does_resource_group_exist.return_value = True - mock_az.get_account_info.return_value = ('test_user', 'test_user_id', 'test_tenant', 'test_subscription') - mock_az.get_unique_suffix_for_resource_group.return_value = 'abc123def456' - - # Mock the run command with proper return object - mock_output = Mock() - mock_output.success = True - mock_output.json_data = {'outputs': 'test'} - mock_output.get.return_value = 'https://test-apim.azure-api.net' - mock_output.getJson.return_value = ['api1', 'api2'] - mock_az.run.return_value = mock_output - - yield mock_az +def mock_policy_fragments(sample_policy_fragments): + """Alias to keep older test signatures stable.""" + return sample_policy_fragments -@pytest.fixture -def mock_policy_fragments(): - """Provide mock policy fragments for testing.""" - return [ - PolicyFragment('Test-Fragment-1', 'test1', 'Test fragment 1'), - PolicyFragment('Test-Fragment-2', 'test2', 'Test fragment 2') - ] @pytest.fixture -def mock_apis(): - """Provide mock APIs for testing.""" - return [ - API('test-api-1', 'Test API 1', '/test1', 'Test API 1 description', 'api1'), - API('test-api-2', 'Test API 2', '/test2', 'Test API 2 description', 'api2') - ] +def mock_apis(sample_apis): + """Alias to keep older test signatures stable.""" + return sample_apis # ------------------------------ @@ -976,12 +943,13 @@ def test_policy_fragment_creation_robustness(mock_utils): def test_cleanup_resources_smoke(monkeypatch): monkeypatch.setattr(infrastructures.az, 'run', lambda *a, **kw: MagicMock(success=True, json_data={})) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) - monkeypatch.setattr(console, 'print_val', lambda *a, **kw: None) + + suppress_module_functions( + monkeypatch, + infrastructures, + ['print_info', 'print_error', 'print_message', 'print_ok', 'print_warning'], + ) + suppress_module_functions(monkeypatch, console, ['print_val']) # Direct private method call for legacy test (should still work) infrastructures._cleanup_resources(INFRASTRUCTURE.SIMPLE_APIM.value, 'rg') @@ -1042,8 +1010,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='Operation completed') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(console, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(console, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, console, ['print_info', 'print_message']) # Execute cleanup infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1092,8 +1059,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='Operation completed') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) # Execute cleanup infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1128,8 +1094,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, json_data=[]) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) # Should not raise exception even when deployment show fails infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1146,8 +1111,7 @@ def mock_print(message): exception_caught.append(message) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) monkeypatch.setattr(infrastructures, 'print_plain', mock_print) monkeypatch.setattr('traceback.print_exc', lambda: None) @@ -1170,9 +1134,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_plain', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message', 'print_plain']) monkeypatch.setattr('traceback.print_exc', lambda: None) infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1199,8 +1161,7 @@ def mock_get_infra_rg_name(deployment, index): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_ok']) # Test with multiple indexes (should use parallel mode) infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) @@ -1240,9 +1201,7 @@ def mock_get_infra_rg_name(deployment, index): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_error', 'print_warning']) # Test with multiple indexes where one fails infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3]) @@ -1332,8 +1291,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_ok']) # Test with 6 indexes (should use parallel mode and handle all indexes) infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, [1, 2, 3, 4, 5, 6]) @@ -1378,8 +1336,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_ok']) # Test with more indexes than available colors to verify cycling num_colors = len(console.THREAD_COLORS) @@ -1414,7 +1371,7 @@ def mock_get_infra_rg_name(deployment, index): monkeypatch.setattr(infrastructures, '_cleanup_resources', mock_cleanup_resources) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info']) # Test all infrastructure types infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 1) @@ -1450,8 +1407,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_ok']) # Test None index (sequential) infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, None) @@ -1519,8 +1475,7 @@ def mock_get_infra_rg_name(deployment, index): monkeypatch.setattr(infrastructures.az, 'run', mock_run) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) # Test _cleanup_resources (private function) infrastructures._cleanup_resources('test-deployment', 'test-rg') # Should not raise @@ -1561,8 +1516,7 @@ def mock_run(*args, **kwargs): monkeypatch.setattr(infrastructures, '_cleanup_resources_thread_safe', mock_cleanup_resources_thread_safe) monkeypatch.setattr(infrastructures.az, 'get_infra_rg_name', mock_get_infra_rg_name) monkeypatch.setattr(infrastructures.az, 'run', mock_run) # Mock Azure CLI calls - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_ok']) # Test with zero index (single index, uses sequential path) infrastructures.cleanup_infra_deployments(INFRASTRUCTURE.SIMPLE_APIM, 0) @@ -1624,12 +1578,12 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='Operation completed') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) - monkeypatch.setattr(console, 'print_ok', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_error', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_warning', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_ok', lambda *a, **kw: None) + suppress_module_functions( + monkeypatch, + infrastructures, + ['print_info', 'print_message', 'print_error', 'print_warning', 'print_ok'], + ) + suppress_module_functions(monkeypatch, console, ['print_ok']) # Should not raise exception even when individual operations fail infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1681,8 +1635,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='Operation completed') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(infrastructures, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) # Should handle malformed responses gracefully without raising exceptions infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -1928,8 +1881,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(console, 'print_info', lambda *a, **kw: None) - monkeypatch.setattr(console, 'print_message', lambda *a, **kw: None) + suppress_module_functions(monkeypatch, console, ['print_info', 'print_message']) infrastructures._cleanup_resources('test-deployment', 'test-rg') @@ -2543,6 +2495,129 @@ def test_disable_apim_public_access_returns_false_when_param_missing(mock_utils, assert result is False mock_az.run.assert_not_called() +def test_infrastructure_constructor_with_all_network_modes(mock_utils): + """Test Infrastructure creation with all network mode options.""" + # Test PUBLIC mode + infra_public = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + networkMode=APIMNetworkMode.PUBLIC + ) + assert infra_public.networkMode == APIMNetworkMode.PUBLIC + + # Test EXTERNAL_VNET mode + infra_external = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + networkMode=APIMNetworkMode.EXTERNAL_VNET + ) + assert infra_external.networkMode == APIMNetworkMode.EXTERNAL_VNET + + # Test INTERNAL_VNET mode + infra_internal = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + networkMode=APIMNetworkMode.INTERNAL_VNET + ) + assert infra_internal.networkMode == APIMNetworkMode.INTERNAL_VNET + + +@pytest.mark.unit +def test_infrastructure_constructor_with_all_sku_types(mock_utils): + """Test Infrastructure creation with all APIM SKU types.""" + sku_types = [ + APIM_SKU.BASICV2, + APIM_SKU.STANDARDV2, + APIM_SKU.DEVELOPER, + APIM_SKU.BASIC, + APIM_SKU.STANDARD, + APIM_SKU.PREMIUM + ] + + for sku in sku_types: + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + apim_sku=sku + ) + assert infra.apim_sku == sku + + +@pytest.mark.unit +def test_infrastructure_constructor_with_mixed_custom_components(mock_utils): + """Test Infrastructure with combinations of custom APIs and policy fragments.""" + # Only custom APIs, no PFs + api_only = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + infra_apis=[API('api1', 'API 1', '/api1', 'API 1')], + infra_pfs=None + ) + api_only._define_policy_fragments() + api_only._define_apis() + assert len(api_only.apis) == 2 # hello-world + api1 + assert len(api_only.pfs) == 6 # only base fragments + + # Only custom PFs, no APIs + pf_only = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=1, + rg_location='eastus', + infra_apis=None, + infra_pfs=[PolicyFragment('pf1', '', 'PF 1')] + ) + pf_only._define_policy_fragments() + pf_only._define_apis() + assert len(pf_only.apis) == 1 # only hello-world + assert len(pf_only.pfs) == 7 # 6 base + pf1 + + +@pytest.mark.unit +def test_infrastructure_constructor_extreme_index_values(mock_utils): + """Test Infrastructure creation with edge-case index values.""" + # Index 0 + infra_zero = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=0, + rg_location='eastus' + ) + assert isinstance(infra_zero.index, int) and infra_zero.index >= 0 # Zero is valid + + # Large index + infra_large = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=9999, + rg_location='eastus' + ) + assert infra_large.index == 9999 + + # Negative index (although not typical, should still work) + infra_negative = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=-1, + rg_location='eastus' + ) + assert infra_negative.index == -1 + + +@pytest.mark.unit +def test_infrastructure_constructor_with_all_infrastructure_types(mock_utils): + """Test that base Infrastructure can be instantiated with all infrastructure types.""" + for infra_type in INFRASTRUCTURE: + infra = infrastructures.Infrastructure( + infra=infra_type, + index=1, + rg_location='eastus' + ) + assert infra.infra == infra_type + + +@pytest.mark.unit @pytest.mark.unit @@ -3491,74 +3566,148 @@ def test_deploy_infrastructure_appgw_prints_final_configuration(mock_utils, mock @pytest.mark.unit -def test_cleanup_resources_with_thread_safe_printing_missing_deployment_name(monkeypatch): - """Test with missing deployment name parameter.""" - print_calls = [] +def test_appgw_apim_infrastructure_has_keyvault_methods(mock_utils, mock_az): + """Test AppGwApimInfrastructure has required Key Vault methods.""" + infra = infrastructures.AppGwApimInfrastructure( + rg_location='eastus', + index=1 + ) + + # Verify methods exist and are callable + assert hasattr(infra, '_create_keyvault') + assert callable(infra._create_keyvault) + assert hasattr(infra, '_create_keyvault_certificate') + assert callable(infra._create_keyvault_certificate) - def mock_print_log(msg, icon, color, **kwargs): - print_calls.append(msg) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) +@pytest.mark.unit +def test_appgw_apim_infrastructure_domain_and_ip_attributes(mock_utils, mock_az): + """Test AppGwApimInfrastructure initializes domain and IP attributes.""" + infra = infrastructures.AppGwApimInfrastructure( + rg_location='eastus', + index=1 + ) + + # Verify attributes can be set + infra.appgw_domain_name = 'test.example.com' + infra.appgw_public_ip = '1.2.3.4' + + assert infra.appgw_domain_name == 'test.example.com' + assert infra.appgw_public_ip == '1.2.3.4' + + +@pytest.mark.unit +def test_appgw_apim_infrastructure_cert_name_constant(mock_utils, mock_az): + """Test AppGwApimInfrastructure has correct certificate name constant.""" + infra = infrastructures.AppGwApimInfrastructure( + rg_location='eastus', + index=1 + ) + + assert infra.CERT_NAME == 'appgw-cert' + assert infra.DOMAIN_NAME == 'api.apim-samples.contoso.com' + + +@pytest.mark.unit +def test_appgw_apim_infrastructure_final_configuration_block(monkeypatch, mock_utils, mock_az): + """Covers lines 982-995: extracts outputs, prints final configuration, and prints curl command.""" + infra = infrastructures.AppGwApimInfrastructure( + rg_location='eastus', + index=1 + ) + + # Ensure step 1 passes quickly without AZ calls + infra._create_keyvault = MagicMock(return_value=True) + infra._create_keyvault_certificate = MagicMock(return_value=True) + + # Capture all print_* and print_command calls from infrastructures module + messages: list[str] = [] + def record(msg: str, *args, **kwargs): + messages.append(msg) + monkeypatch.setattr(infrastructures, 'print_ok', record) + monkeypatch.setattr(infrastructures, 'print_info', record) + monkeypatch.setattr(infrastructures, 'print_plain', record) + monkeypatch.setattr(infrastructures, 'print_command', record) + + # Mock the base deploy to return expected outputs structure + mock_output = Output(True, '') + mock_output.json_data = { + 'properties': { + 'outputs': { + 'appGatewayDomainName': {'value': 'appgw.example.com'}, + 'appgwPublicIpAddress': {'value': '1.2.3.4'}, + } + } + } + + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + result = infra.deploy_infrastructure() + + # Output should be the same object and success + assert result.success is True + + # Values should be set from outputs + assert infra.appgw_domain_name == 'appgw.example.com' + assert infra.appgw_public_ip == '1.2.3.4' + + # Verify final configuration messages were printed + def any_msg(substr: str) -> bool: + return any(substr in m for m in messages) + + assert any_msg('Application Gateway deployed') + assert any_msg('API Management deployed in VNet (Internal)') + assert any_msg('No Private Endpoints used') + assert any_msg('Traffic flow: Internet') + + # Verify the curl command includes domain and IP + assert any_msg('curl -v -k -H') + assert any_msg('Host: appgw.example.com') + assert any_msg('https://1.2.3.4') + +@pytest.mark.unit +def test_cleanup_resources_with_thread_safe_printing_missing_deployment_name(monkeypatch): + """Test with missing deployment name parameter.""" + print_calls = capture_module_print_log(monkeypatch, infrastructures) infrastructures._cleanup_resources_with_thread_safe_printing('', 'test-rg', '[TEST]: ', 'color') - assert any('Missing deployment name parameter' in call for call in print_calls) + assert any('Missing deployment name parameter' in call['msg'] for call in print_calls) @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_missing_resource_group(monkeypatch): """Test with missing resource group name parameter.""" - print_calls = [] - - def mock_print_log(msg, icon, color, **kwargs): - print_calls.append(msg) - - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) + print_calls = capture_module_print_log(monkeypatch, infrastructures) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', '', '[TEST]: ', 'color') - assert any('Missing resource group name parameter' in call for call in print_calls) + assert any('Missing resource group name parameter' in call['msg'] for call in print_calls) @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_none_deployment_name(monkeypatch): """Test with None deployment name parameter.""" - print_calls = [] - - def mock_print_log(msg, icon, color, **kwargs): - print_calls.append(msg) - - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) + print_calls = capture_module_print_log(monkeypatch, infrastructures) infrastructures._cleanup_resources_with_thread_safe_printing(None, 'test-rg', '[TEST]: ', 'color') - assert any('Missing deployment name parameter' in call for call in print_calls) + assert any('Missing deployment name parameter' in call['msg'] for call in print_calls) @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_none_resource_group(monkeypatch): """Test with None resource group name parameter.""" - print_calls = [] - - def mock_print_log(msg, icon, color, **kwargs): - print_calls.append(msg) - - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) + print_calls = capture_module_print_log(monkeypatch, infrastructures) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', None, '[TEST]: ', 'color') - assert any('Missing resource group name parameter' in call for call in print_calls) + assert any('Missing resource group name parameter' in call['msg'] for call in print_calls) @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_success_with_no_resources(monkeypatch): """Test successful cleanup with no resources to delete.""" run_calls = [] - print_calls = [] def mock_run(command, ok_msg=None, error_msg=None): run_calls.append(command) @@ -3568,18 +3717,14 @@ def mock_run(command, ok_msg=None, error_msg=None): return Output(True, json.dumps([])) return Output(True, '{}') - def mock_print_log(msg, icon, color, **kwargs): - print_calls.append(msg) - monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + print_calls = capture_module_print_log(monkeypatch, infrastructures) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') assert len(print_calls) > 0 - assert any('Cleanup completed' in call for call in print_calls) + assert any('Cleanup completed' in call['msg'] for call in print_calls) @pytest.mark.unit @@ -3608,10 +3753,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): assert all(r['type'] == 'cognitiveservices' for r in resources) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3636,10 +3780,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): assert all(r['type'] == 'apim' for r in resources) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3665,10 +3808,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): assert all(r['type'] == 'keyvault' for r in resources) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3693,10 +3835,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): assert resource_types == {'cognitiveservices', 'apim', 'keyvault'} monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3712,10 +3853,12 @@ def mock_run(command, ok_msg=None, error_msg=None): return Output(True, '{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) + suppress_module_functions( + monkeypatch, + infrastructures, + ['_cleanup_resources_parallel_thread_safe', '_delete_resource_group_best_effort'], + ) # Should not raise exception infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3734,10 +3877,12 @@ def mock_run(command, ok_msg=None, error_msg=None): return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) + suppress_module_functions( + monkeypatch, + infrastructures, + ['_cleanup_resources_parallel_thread_safe', '_delete_resource_group_best_effort'], + ) # Should handle failure gracefully infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3755,8 +3900,7 @@ def mock_delete_rg(rg_name, thread_prefix, thread_color): rg_delete_called.append((rg_name, thread_prefix, thread_color)) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', mock_delete_rg) monkeypatch.setattr('infrastructures.should_print_traceback', lambda: False) @@ -3783,8 +3927,7 @@ def mock_delete_rg(rg_name, thread_prefix, thread_color): rg_delete_calls.append((rg_name, thread_prefix, thread_color)) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', lambda *args, **kwargs: None) monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', mock_delete_rg) @@ -3813,10 +3956,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): cleanup_parallel_calls.append((thread_prefix, thread_color)) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) test_prefix = '[CUSTOM-PREFIX]: ' test_color = '\033[35m' # Magenta color code @@ -3830,7 +3972,6 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_logs_resource_group_name(monkeypatch): """Test that resource group name is logged.""" - log_calls = [] def mock_run(command, ok_msg=None, error_msg=None): if 'deployment group show' in command: @@ -3839,25 +3980,573 @@ def mock_run(command, ok_msg=None, error_msg=None): return Output(success=True, json_data=[]) return Output(success=True, text='{}') - def mock_print_log(msg, icon, color, **kwargs): - log_calls.append((msg, icon)) - monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) - monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + log_calls = capture_module_print_log(monkeypatch, infrastructures) + suppress_module_functions( + monkeypatch, + infrastructures, + ['_cleanup_resources_parallel_thread_safe', '_delete_resource_group_best_effort'], + ) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'my-test-rg', '[TEST]: ', 'color') # Verify RG name was logged - assert any('my-test-rg' in msg for msg, _icon in log_calls) + assert any('my-test-rg' in call['msg'] for call in log_calls) @pytest.mark.unit def test_cleanup_resources_with_thread_safe_printing_success_completion_message(monkeypatch): """Test that success completion message is logged.""" - log_calls = [] + + +# ------------------------------ +# NEW EDGE CASE AND ERROR PATH TESTS +# ------------------------------ + +@pytest.mark.unit +def test_approve_private_link_connections_output_failure(mock_utils, mock_az): + """Test _approve_private_link_connections when list command fails.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + mock_az.run.return_value = Mock(success=False) + + assert infra._approve_private_link_connections('test-apim-id') is False + + +@pytest.mark.unit +def test_approve_private_link_connections_single_dict_response(mock_utils, mock_az): + """Test _approve_private_link_connections with single dict response.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + # Mock list returns single dict instead of list + mock_az.run.side_effect = [ + Mock(success=True, json_data={'id': 'conn-1', 'name': 'conn-name'}, is_json=True), + Mock(success=True) # approve call + ] + + assert infra._approve_private_link_connections('test-apim-id') is True + + +@pytest.mark.unit +def test_approve_private_link_connections_zero_pending(mock_utils, mock_az): + """Test _approve_private_link_connections with zero pending connections.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + mock_az.run.return_value = Mock(success=True, json_data=[], is_json=True) + + assert infra._approve_private_link_connections('test-apim-id') is True + + +@pytest.mark.unit +def test_approve_private_link_connections_approval_failure(mock_utils, mock_az): + """Test _approve_private_link_connections when approval fails.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = [ + Mock(success=True, json_data=[{'id': 'conn-1', 'name': 'conn-name'}], is_json=True), + Mock(success=False) # approve fails + ] + + assert infra._approve_private_link_connections('test-apim-id') is False + + +@pytest.mark.unit +def test_approve_private_link_connections_exception(mock_utils, mock_az): + """Test _approve_private_link_connections when exception occurs.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + mock_az.run.side_effect = Exception("Network error") + + assert infra._approve_private_link_connections('test-apim-id') is False + + +@pytest.mark.unit +def test_create_keyvault_success_path(mock_utils, mock_az): + """Test _create_keyvault successful creation.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = [ + Mock(success=False), # show fails, doesn't exist + Mock(success=True), # create success + Mock(success=True), # role assignment success + ] + + with patch('time.sleep'): # Skip the 15 second wait + assert infra._create_keyvault('test-kv') is True + + +@pytest.mark.unit +def test_create_keyvault_role_assignment_failure(mock_utils, mock_az): + """Test _create_keyvault when role assignment fails.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = [ + Mock(success=False), # show fails, doesn't exist + Mock(success=True), # create success + Mock(success=False), # role assignment fails + ] + + assert infra._create_keyvault('test-kv') is False + + +@pytest.mark.unit +def test_verify_apim_connectivity_non_200_response(mock_utils): + """Test _verify_apim_connectivity with non-200 response.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + with patch('requests.get') as mock_get: + mock_get.return_value = Mock(status_code=404) + # Should still return True (continues anyway) + assert infra._verify_apim_connectivity('https://test.apim.net') is True + + +@pytest.mark.unit +def test_verify_apim_connectivity_exception(mock_utils): + """Test _verify_apim_connectivity when request raises exception.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + with patch('requests.get') as mock_get: + mock_get.side_effect = Exception("Connection timeout") + # Should still return True (continues anyway) + assert infra._verify_apim_connectivity('https://test.apim.net') is True + + +@pytest.mark.unit +def test_verify_infrastructure_missing_rg(mock_utils, mock_az): + """Test _verify_infrastructure when resource group doesn't exist.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + mock_az.does_resource_group_exist.return_value = False + + assert infra._verify_infrastructure('test-rg') is False + + +@pytest.mark.unit +def test_verify_infrastructure_no_apim(mock_utils, mock_az): + """Test _verify_infrastructure when APIM service not found.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + mock_az.does_resource_group_exist.return_value = True + mock_az.run.return_value = Mock(success=True, json_data=None) + + assert infra._verify_infrastructure('test-rg') is False + + +@pytest.mark.unit +def test_verify_infrastructure_subscription_key_exception(mock_utils, mock_az): + """Test _verify_infrastructure when subscription key retrieval raises exception.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + mock_az.does_resource_group_exist.return_value = True + mock_az.run.side_effect = [ + Mock(success=True, json_data={'name': 'test-apim'}), # APIM list + Mock(success=True, text='5') # API count + ] + mock_az.get_apim_subscription_key.side_effect = Exception("Key error") + + # Should still succeed despite exception + with patch.object(infra, '_verify_infrastructure_specific', return_value=True): + assert infra._verify_infrastructure('test-rg') is True + + +@pytest.mark.unit +def test_verify_infrastructure_specific_verification_failure(mock_utils, mock_az): + """Test _verify_infrastructure when infrastructure-specific verification fails.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + mock_az.does_resource_group_exist.return_value = True + mock_az.run.side_effect = [ + Mock(success=True, json_data={'name': 'test-apim'}), + Mock(success=True, text='1') + ] + + with patch.object(infra, '_verify_infrastructure_specific', return_value=False): + assert infra._verify_infrastructure('test-rg') is False + + +@pytest.mark.unit +def test_verify_infrastructure_exception(mock_utils, mock_az): + """Test _verify_infrastructure when exception occurs.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + mock_az.does_resource_group_exist.side_effect = Exception("Azure error") + + assert infra._verify_infrastructure('test-rg') is False + + +@pytest.mark.unit +def test_deploy_infrastructure_unknown_infrastructure_type(mock_utils): + """Test deploy_infrastructure with unknown infrastructure type.""" + infra = infrastructures.Infrastructure( + infra=INFRASTRUCTURE.SIMPLE_APIM, + index=TEST_INDEX, + rg_location=TEST_LOCATION + ) + + # Temporarily change to invalid type + infra.infra = Mock(value='invalid-type') + + with pytest.raises(ValueError, match="Unknown infrastructure type"): + infra.deploy_infrastructure() + + +@pytest.mark.unit +def test_afd_apim_aca_deploy_initial_deployment_failure(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure deploy when initial deployment fails.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=Output(False, 'Deploy failed')): + result = infra.deploy_infrastructure() + assert result.success is False + + +@pytest.mark.unit +@pytest.mark.unit +def test_afd_apim_aca_deploy_no_output_data(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure deploy when no output data available.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=Output(True, '')): + result = infra.deploy_infrastructure() + assert result.success is False or result.json_data is None + + +@pytest.mark.unit +def test_afd_apim_aca_deploy_missing_apim_info(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure deploy when APIM info missing from output.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + mock_output = Output(True, '{\"otherData\": {\"value\": \"test\"}}') + + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + with patch.object(mock_output, 'get', return_value=None): + result = infra.deploy_infrastructure() + # Should return the output when required info is missing + assert result == mock_output + + +@pytest.mark.unit +def test_afd_apim_aca_deploy_disable_public_access_failure(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure deploy when disabling public access fails.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + # Create mock output with required properties + mock_output = Output(True, '') + mock_output.json_data = { + 'apimServiceId': {'value': 'test-id'}, + 'apimResourceGatewayURL': {'value': 'https://test.apim.net'} + } + + # Create the failure output that will be returned by utils.Output() + failure_output = Output(False, 'Failed to disable public access') + mock_utils.Output.return_value = failure_output + + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + with patch.object(infra, '_approve_private_link_connections', return_value=True): + with patch.object(infra, '_verify_apim_connectivity', return_value=True): + with patch.object(infra, '_disable_apim_public_access', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False and 'public access' in result.text.lower() + + +@pytest.mark.unit +def test_afd_apim_aca_verify_no_afd_profile(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure verification when no AFD profile found.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = [ + Mock(success=True, text='{}'), # AFD list returns empty + Mock(success=True, text='test-apim-id'), # APIM ID + ] + + assert infra._verify_infrastructure_specific('test-rg') is False + + +@pytest.mark.unit +def test_afd_apim_aca_verify_exception(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure verification when exception occurs.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = Exception("Azure error") + + assert infra._verify_infrastructure_specific('test-rg') is False + + +@pytest.mark.unit +def test_afd_apim_aca_verify_pe_count_success(mock_utils, mock_az): + """Test AfdApimAcaInfrastructure verification with PE count.""" + infra = infrastructures.AfdApimAcaInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = [ + Mock(success=True, json_data={'name': 'afd-profile'}), # AFD exists + Mock(success=True, text='2'), # PE count + ] + + assert infra._verify_infrastructure_specific('test-rg') is True + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_keyvault_creation_failure(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when Key Vault creation fails.""" + mock_utils.Output.side_effect = Output + + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False + assert 'Failed to create Key Vault' in result.text + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_certificate_creation_failure(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when certificate creation fails.""" + mock_utils.Output.side_effect = Output + + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False + assert 'certificate' in result.text.lower() + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_no_output_data(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when no output data available.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=Output(True, '')): + result = infra.deploy_infrastructure() + assert result.success is False or result.json_data is None + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_missing_apim_info(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when APIM info missing.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + mock_output = Output(True, '{\"otherData\": {\"value\": \"test\"}}') + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + with patch.object(mock_output, 'get', return_value=None): + result = infra.deploy_infrastructure() + assert result == mock_output + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_approve_private_link_failure(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when private link approval fails.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + # Create mock output with all required properties for AppGW + mock_output = Output(True, '') + mock_output.json_data = { + 'apimServiceId': {'value': 'test-id'}, + 'apimResourceGatewayURL': {'value': 'https://test.apim.net'}, + 'appGatewayDomainName': {'value': 'test.appgw.net'}, + 'appgwPublicIpAddress': {'value': '1.2.3.4'} + } + + # Create the failure output that will be returned by utils.Output() + failure_output = Output(False, 'Private link approval failed') + mock_utils.Output.return_value = failure_output + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + with patch.object(infra, '_approve_private_link_connections', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False and 'private link' in result.text.lower() + + +@pytest.mark.unit +def test_appgw_apim_pe_deploy_disable_public_access_failure(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure deploy when disabling public access fails.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + # Create mock output with all required properties for AppGW + mock_output = Output(True, '') + mock_output.json_data = { + 'apimServiceId': {'value': 'test-id'}, + 'apimResourceGatewayURL': {'value': 'https://test.apim.net'}, + 'appGatewayDomainName': {'value': 'test.appgw.net'}, + 'appgwPublicIpAddress': {'value': '1.2.3.4'} + } + + # Create the failure output that will be returned by utils.Output() + failure_output = Output(False, 'Failed to disable public access') + mock_utils.Output.return_value = failure_output + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + with patch.object(infra, '_approve_private_link_connections', return_value=True): + with patch.object(infra, '_disable_apim_public_access', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False and 'public access' in result.text.lower() + + +@pytest.mark.unit +def test_appgw_apim_pe_verify_exception(mock_utils, mock_az): + """Test AppGwApimPeInfrastructure verification when exception occurs.""" + infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) + + mock_az.run.side_effect = Exception("Azure error") + + assert infra._verify_infrastructure_specific('test-rg') is False + + +@pytest.mark.unit +def test_appgw_apim_deploy_keyvault_creation_failure(mock_utils, mock_az): + """Test AppGwApimInfrastructure deploy when Key Vault creation fails.""" + mock_utils.Output.side_effect = Output + + infra = infrastructures.AppGwApimInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False + assert 'Failed to create Key Vault' in result.text + + +@pytest.mark.unit +def test_appgw_apim_deploy_certificate_creation_failure(mock_utils, mock_az): + """Test AppGwApimInfrastructure deploy when certificate creation fails.""" + mock_utils.Output.side_effect = Output + + infra = infrastructures.AppGwApimInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=False): + result = infra.deploy_infrastructure() + assert result.success is False + assert 'certificate' in result.text.lower() + + +@pytest.mark.unit +def test_appgw_apim_deploy_deployment_failure(mock_utils, mock_az): + """Test AppGwApimInfrastructure deploy when deployment fails.""" + infra = infrastructures.AppGwApimInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=Output(False, 'Deploy failed')): + result = infra.deploy_infrastructure() + assert result.success is False + + +@pytest.mark.unit +def test_appgw_apim_deploy_no_output_data(mock_utils, mock_az): + """Test AppGwApimInfrastructure deploy when no output data available.""" + infra = infrastructures.AppGwApimInfrastructure(rg_location='eastus', index=1) + + with patch.object(infra, '_create_keyvault', return_value=True): + with patch.object(infra, '_create_keyvault_certificate', return_value=True): + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=Output(True, '')): + result = infra.deploy_infrastructure() + assert result.success is False or result.json_data is None + + +@pytest.mark.unit +def test_cleanup_single_resource_unknown_type(mock_utils, mock_az): + """Test _cleanup_single_resource with unknown resource type.""" + success, error_msg = infrastructures._cleanup_single_resource({ + 'type': 'unknown-type', + 'name': 'test-resource', + 'location': 'eastus', + 'rg_name': 'test-rg' + }) + + assert success is False + assert "Unknown resource type" in error_msg + + +@pytest.mark.unit +def test_cleanup_single_resource_purge_failure(mock_utils, mock_az): + """Test _cleanup_single_resource when purge fails.""" + mock_az.run.side_effect = [ + Mock(success=True), # delete success + Mock(success=False) # purge failure + ] + + success, error_msg = infrastructures._cleanup_single_resource({ + 'type': 'keyvault', + 'name': 'test-kv', + 'location': 'eastus', + 'rg_name': 'test-rg' + }) + + assert success is False + assert "Purge failed" in error_msg + + +@pytest.mark.unit +def test_cleanup_resources_parallel_empty_list(mock_utils): + """Test _cleanup_resources_parallel with empty resource list.""" + # Should return without error + infrastructures._cleanup_resources_parallel([]) + + +@pytest.mark.unit +def test_cleanup_resources_parallel_thread_safe_logging(monkeypatch, mock_utils): + """Test _cleanup_resources_parallel uses thread-safe logging when prefix provided.""" + + def mock_cleanup(resource): + return True, "" + + monkeypatch.setattr(infrastructures, '_cleanup_single_resource', mock_cleanup) + patch_module_thread_safe_printing(monkeypatch, infrastructures) + + resources = [{'type': 'keyvault', 'name': 'kv-1', 'location': 'eastus', 'rg_name': 'rg-1'}] + infrastructures._cleanup_resources_parallel(resources, thread_prefix='[TEST]: ', thread_color='\033[35m') + + # Should complete without error + + +@pytest.mark.unit +def test_cleanup_resources_parallel_exception_in_worker(monkeypatch, mock_utils): + """Test _cleanup_resources_parallel handles worker exceptions.""" + def mock_cleanup(resource): + raise Exception("Worker error") + + monkeypatch.setattr(infrastructures, '_cleanup_single_resource', mock_cleanup) + patch_module_thread_safe_printing(monkeypatch, infrastructures) + + resources = [{'type': 'keyvault', 'name': 'kv-1', 'location': 'eastus', 'rg_name': 'rg-1'}] + + # Should handle exception gracefully + infrastructures._cleanup_resources_parallel(resources, thread_prefix='[TEST]: ') def mock_run(command, ok_msg=None, error_msg=None): if 'deployment group show' in command: @@ -3866,19 +4555,18 @@ def mock_run(command, ok_msg=None, error_msg=None): return Output(True, json.dumps([])) return Output(True, '{}') - def mock_print_log(msg, icon, color, **kwargs): - log_calls.append(msg) - monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', mock_print_log) - monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', lambda *args, **kwargs: None) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + log_calls = capture_module_print_log(monkeypatch, infrastructures) + suppress_module_functions( + monkeypatch, + infrastructures, + ['_cleanup_resources_parallel_thread_safe', '_delete_resource_group_best_effort'], + ) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') assert len(log_calls) > 0 - assert any('Cleanup completed' in msg for msg in log_calls) + assert any('Cleanup completed' in call['msg'] for call in log_calls) @pytest.mark.unit @@ -3899,10 +4587,9 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): cleanup_calls.append(len(resources)) monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') @@ -3934,9 +4621,8 @@ def mock_cleanup_parallel(resources, thread_prefix, thread_color): assert len(resources) == 15 # 5 + 3 + 7 monkeypatch.setattr(infrastructures.az, 'run', mock_run) - monkeypatch.setattr(infrastructures, '_print_lock', MagicMock()) - monkeypatch.setattr(infrastructures, '_print_log', lambda *args, **kwargs: None) + patch_module_thread_safe_printing(monkeypatch, infrastructures) monkeypatch.setattr(infrastructures, '_cleanup_resources_parallel_thread_safe', mock_cleanup_parallel) - monkeypatch.setattr(infrastructures, '_delete_resource_group_best_effort', lambda *args, **kwargs: None) + suppress_module_functions(monkeypatch, infrastructures, ['_delete_resource_group_best_effort']) infrastructures._cleanup_resources_with_thread_safe_printing('test-deployment', 'test-rg', '[TEST]: ', 'color') diff --git a/tests/python/test_logging_config.py b/tests/python/test_logging_config.py index 70cced8..839841a 100644 --- a/tests/python/test_logging_config.py +++ b/tests/python/test_logging_config.py @@ -95,3 +95,134 @@ def test_should_print_traceback_from_env(level: str, expected: bool, monkeypatch monkeypatch.setenv('APIM_SAMPLES_LOG_LEVEL', level) assert logging_config.should_print_traceback() is expected + + +def test_configure_logging_with_force_reconfigures(monkeypatch: pytest.MonkeyPatch) -> None: + logging_config.configure_logging(level='INFO') + assert logging_config._state['configured'] is True + + root_logger = logging.getLogger() + assert root_logger.level == logging.INFO + + logging_config.configure_logging(level='DEBUG', force=True) + assert root_logger.level == logging.DEBUG + + +def test_configure_logging_no_force_updates_level_only(monkeypatch: pytest.MonkeyPatch) -> None: + logging_config.configure_logging(level='INFO') + handler_count = len(logging.getLogger().handlers) + + logging_config.configure_logging(level='WARNING') + assert logging.getLogger().level == logging.WARNING + assert len(logging.getLogger().handlers) == handler_count + + +def test_is_debug_enabled_with_custom_logger(monkeypatch: pytest.MonkeyPatch) -> None: + logging_config.configure_logging(level='DEBUG') + custom_logger = logging.getLogger('test_logger') + custom_logger.setLevel(logging.INFO) + + assert logging_config.is_debug_enabled(custom_logger) is False + assert logging_config.is_debug_enabled() is True + + +def test_is_debug_enabled_with_none_uses_root(monkeypatch: pytest.MonkeyPatch) -> None: + logging_config.configure_logging(level='DEBUG') + assert logging_config.is_debug_enabled(None) is True + + logging_config.configure_logging(level='INFO', force=True) + assert logging_config.is_debug_enabled(None) is False + + +def test_find_env_file_returns_none_when_not_found(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + # Create a fresh directory that definitely has no .env + empty_dir = tmp_path / 'empty' + empty_dir.mkdir() + monkeypatch.chdir(empty_dir) + monkeypatch.delenv('PROJECT_ROOT', raising=False) + + # Mock __file__ to point to a location without .env + fake_module = empty_dir / 'shared' / 'python' / 'logging_config.py' + fake_module.parent.mkdir(parents=True) + monkeypatch.setattr(logging_config, '__file__', str(fake_module)) + + found = logging_config._find_env_file() + assert found is None + + +def test_load_dotenv_once_skips_when_no_dotenv_module(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr(logging_config, 'load_dotenv', None) + + logging_config._state['dotenv_loaded'] = False + logging_config._load_dotenv_once() + + assert logging_config._state['dotenv_loaded'] is True + + +def test_get_configured_level_name_from_env(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv('APIM_SAMPLES_LOG_LEVEL', 'ERROR') + + assert logging_config.get_configured_level_name() == 'ERROR' + + +def test_get_configured_level_name_defaults_to_info(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv('APIM_SAMPLES_LOG_LEVEL', raising=False) + + assert logging_config.get_configured_level_name() == 'INFO' + + +def test_normalize_level_name_strips_whitespace() -> None: + assert logging_config._normalize_level_name(' DEBUG ') == 'DEBUG' + + +def test_normalize_level_name_accepts_all_valid_levels() -> None: + for level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']: + assert logging_config._normalize_level_name(level) == level + assert logging_config._normalize_level_name(level.lower()) == level + + +def test_configure_logging_uses_override_level(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv('APIM_SAMPLES_LOG_LEVEL', 'INFO') + + logging_config.configure_logging(level='DEBUG') + + assert logging.getLogger().level == logging.DEBUG + + +def test_configure_logging_uses_env_when_no_override(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv('APIM_SAMPLES_LOG_LEVEL', 'ERROR') + + logging_config.configure_logging() + + assert logging.getLogger().level == logging.ERROR + + +def test_find_env_file_checks_cwd(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + (tmp_path / '.env').write_text('APIM_SAMPLES_LOG_LEVEL=DEBUG\n', encoding='utf-8') + monkeypatch.chdir(tmp_path) + monkeypatch.delenv('PROJECT_ROOT', raising=False) + + found = logging_config._find_env_file() + + assert found == tmp_path / '.env' + + +def test_find_env_file_checks_module_path(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + # Mock __file__ to point to tmp_path/shared/python/logging_config.py + fake_module_path = tmp_path / 'shared' / 'python' / 'logging_config.py' + fake_module_path.parent.mkdir(parents=True) + fake_module_path.write_text('# fake', encoding='utf-8') + + (tmp_path / '.env').write_text('APIM_SAMPLES_LOG_LEVEL=DEBUG\n', encoding='utf-8') + + # Create the 'other' directory so chdir works + other_dir = tmp_path / 'other' + other_dir.mkdir() + + monkeypatch.setattr(logging_config, '__file__', str(fake_module_path)) + monkeypatch.chdir(other_dir) + monkeypatch.delenv('PROJECT_ROOT', raising=False) + + found = logging_config._find_env_file() + + assert found == tmp_path / '.env' diff --git a/tests/python/test_show_soft_deleted_resources.py b/tests/python/test_show_soft_deleted_resources.py index e4da665..b97ef4d 100644 --- a/tests/python/test_show_soft_deleted_resources.py +++ b/tests/python/test_show_soft_deleted_resources.py @@ -1,9 +1,11 @@ """Tests for show_soft_deleted_resources module.""" +import builtins from unittest.mock import MagicMock, patch # APIM Samples imports import show_soft_deleted_resources as sdr +from test_helpers import mock_module_functions # ------------------------------ @@ -289,7 +291,7 @@ def mock_az_run(cmd, *a, **k): monkeypatch.setattr('show_soft_deleted_resources.get_deleted_apim_services', mock_get_apim) monkeypatch.setattr('show_soft_deleted_resources.get_deleted_key_vaults', mock_get_vaults) monkeypatch.setattr('show_soft_deleted_resources.az.run', mock_az_run) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) monkeypatch.setattr('sys.argv', ['script.py']) result = sdr.main() @@ -313,7 +315,7 @@ def mock_az_run(cmd, *a, **k): monkeypatch.setattr('show_soft_deleted_resources.get_deleted_apim_services', mock_get_apim) monkeypatch.setattr('show_soft_deleted_resources.get_deleted_key_vaults', mock_get_vaults) monkeypatch.setattr('show_soft_deleted_resources.az.run', mock_az_run) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) monkeypatch.setattr('sys.argv', ['script.py']) result = sdr.main() @@ -349,7 +351,7 @@ def mock_az_run(cmd, *a, **k): monkeypatch.setattr('show_soft_deleted_resources.purge_apim_services', mock_purge_apim) monkeypatch.setattr('show_soft_deleted_resources.purge_key_vaults', mock_purge_kv) monkeypatch.setattr('show_soft_deleted_resources.az.run', mock_az_run) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) monkeypatch.setattr('sys.argv', ['script.py', '--purge']) result = sdr.main() @@ -377,7 +379,7 @@ def mock_az_run(cmd, *a, **k): monkeypatch.setattr('show_soft_deleted_resources.get_deleted_key_vaults', mock_get_vaults) monkeypatch.setattr('show_soft_deleted_resources.confirm_purge', mock_confirm_purge) monkeypatch.setattr('show_soft_deleted_resources.az.run', mock_az_run) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) monkeypatch.setattr('sys.argv', ['script.py', '--purge']) result = sdr.main() @@ -405,7 +407,7 @@ def track_purge_kv(v): monkeypatch.setattr('show_soft_deleted_resources.purge_apim_services', track_purge_apim) monkeypatch.setattr('show_soft_deleted_resources.purge_key_vaults', track_purge_kv) monkeypatch.setattr('show_soft_deleted_resources.az.run', lambda cmd, *a, **k: MagicMock(success=True, json_data={'name': 'test-sub', 'id': 'sub-id'})) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) monkeypatch.setattr('sys.argv', ['script.py', '--purge', '--yes']) result = sdr.main() @@ -513,7 +515,7 @@ def mock_run(cmd, *args, **kwargs): return output monkeypatch.setattr('show_soft_deleted_resources.az.run', mock_run) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) result = sdr.purge_apim_services(services) assert result == 2 # Two succeeded @@ -532,7 +534,7 @@ def test_purge_key_vaults_mixed_protection(monkeypatch): mock_output.success = True monkeypatch.setattr('show_soft_deleted_resources.az.run', lambda cmd, *a, **k: mock_output) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) success_count, skipped_count = sdr.purge_key_vaults(vaults) assert success_count == 2 @@ -554,7 +556,7 @@ def test_confirm_purge_eof_error(monkeypatch): def raise_eof(*args, **kwargs): raise EOFError() monkeypatch.setattr('builtins.input', raise_eof) - monkeypatch.setattr('builtins.print', MagicMock()) + mock_module_functions(monkeypatch, builtins, ['print']) result = sdr.confirm_purge(1, 1, 0) assert result is False diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 8dfd46d..520c2e1 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -1,12 +1,11 @@ -import os -import io import builtins +import os import inspect import base64 import subprocess import logging from pathlib import Path -from unittest.mock import MagicMock, mock_open +from unittest.mock import MagicMock import json import pytest @@ -16,20 +15,55 @@ import json_utils import azure_resources as az from console import print_error, print_info, print_message, print_ok, print_val, print_warning +import console as console_module +from test_helpers import ( + capture_console_output as capture_output, + mock_popen, + patch_create_bicep_deployment_group_dependencies, + patch_open_for_text_read, + suppress_module_functions, +) @pytest.fixture def suppress_utils_console(monkeypatch): - for attr in ( - 'print_plain', - 'print_info', - 'print_ok', - 'print_warning', - 'print_error', - 'print_message', - 'print_val', - ): - monkeypatch.setattr(utils, attr, lambda *args, **kwargs: None) + suppress_module_functions( + monkeypatch, + utils, + [ + 'print_plain', + 'print_info', + 'print_ok', + 'print_warning', + 'print_error', + 'print_message', + 'print_val', + ], + ) + + +@pytest.fixture +def suppress_console(monkeypatch): + suppress_module_functions( + monkeypatch, + console_module, + [ + 'print_plain', + 'print_command', + 'print_info', + 'print_ok', + 'print_warning', + 'print_error', + 'print_message', + 'print_val', + ], + ) + + +@pytest.fixture +def suppress_builtin_print(monkeypatch): + suppress_module_functions(monkeypatch, builtins, ['print']) + # ------------------------------ # get_infra_rg_name & get_rg_name @@ -74,70 +108,27 @@ def test_run_failure(monkeypatch): def test_read_policy_xml_success(monkeypatch): """Test reading a valid XML file returns its contents.""" xml_content = '' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if file_str == '/path/to/dummy.xml' and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match='/path/to/dummy.xml', read_data=xml_content) # Use full path to avoid sample name auto-detection result = utils.read_policy_xml('/path/to/dummy.xml') assert result == xml_content def test_read_policy_xml_file_not_found(monkeypatch): """Test reading a missing XML file raises FileNotFoundError.""" - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if file_str == '/path/to/missing.xml' and 'b' not in mode: - raise FileNotFoundError('File not found') - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match='/path/to/missing.xml', raises=FileNotFoundError('File not found')) with pytest.raises(FileNotFoundError): utils.read_policy_xml('/path/to/missing.xml') def test_read_policy_xml_empty_file(monkeypatch): """Test reading an empty XML file returns an empty string.""" - m = mock_open(read_data='') - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if file_str == '/path/to/empty.xml' and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match='/path/to/empty.xml', read_data='') result = utils.read_policy_xml('/path/to/empty.xml') assert not result def test_read_policy_xml_with_named_values(monkeypatch): """Test reading policy XML with named values formatting.""" xml_content = '{jwt_signing_key}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if file_str.endswith('hr_all_operations.xml') and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: p.endswith('hr_all_operations.xml'), read_data=xml_content) # Mock the auto-detection to return 'authX' def mock_inspect_currentframe(): @@ -161,18 +152,7 @@ def mock_inspect_currentframe(): def test_read_policy_xml_legacy_mode(monkeypatch): """Test that legacy mode (full path) still works.""" xml_content = '' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if file_str == '/full/path/to/policy.xml' and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match='/full/path/to/policy.xml', read_data=xml_content) result = utils.read_policy_xml('/full/path/to/policy.xml') assert result == xml_content @@ -273,17 +253,14 @@ def test_build_infrastructure_tags_none_custom_tags(): def test_create_bicep_deployment_group_with_enum(monkeypatch): """Test create_bicep_deployment_group with INFRASTRUCTURE enum.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - # Mock os functions for file path operations - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) + mock_create_rg, mock_run, _mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) bicep_params = {'param1': {'value': 'test'}} rg_tags = {'infrastructure': 'simple-apim'} @@ -304,17 +281,14 @@ def test_create_bicep_deployment_group_with_enum(monkeypatch): def test_create_bicep_deployment_group_with_string(monkeypatch): """Test create_bicep_deployment_group with string deployment name.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - # Mock os functions for file path operations - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) + mock_create_rg, mock_run, _mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) bicep_params = {'param1': {'value': 'test'}} @@ -332,17 +306,7 @@ def test_create_bicep_deployment_group_with_string(monkeypatch): def test_create_bicep_deployment_group_params_file_written(monkeypatch): """Test that bicep parameters are correctly written to file.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - - # Mock os functions for file path operations # For this test, we want to simulate being in an infrastructure directory - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir/infrastructure/apim-aca')) def mock_exists(path): # Only return True for the main.bicep in the infrastructure directory, not in current dir @@ -351,8 +315,14 @@ def mock_exists(path): return True return False - monkeypatch.setattr('os.path.exists', mock_exists) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='apim-aca')) + _mock_create_rg, _mock_run, mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir/infrastructure/apim-aca', + exists=mock_exists, + basename='apim-aca', + ) bicep_params = { 'apiManagementName': {'value': 'test-apim'}, @@ -378,17 +348,14 @@ def mock_exists(path): def test_create_bicep_deployment_group_no_tags(monkeypatch): """Test create_bicep_deployment_group without tags.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - # Mock os functions for file path operations - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) + mock_create_rg, _mock_run, _mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) bicep_params = {'param1': {'value': 'test'}} @@ -399,17 +366,14 @@ def test_create_bicep_deployment_group_no_tags(monkeypatch): def test_create_bicep_deployment_group_deployment_failure(monkeypatch): """Test create_bicep_deployment_group when deployment fails.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=False)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - # Mock os functions for file path operations - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) + mock_create_rg, _mock_run, _mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=False, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) bicep_params = {'param1': {'value': 'test'}} @@ -428,21 +392,7 @@ def test_create_bicep_deployment_group_deployment_failure(monkeypatch): def test_print_functions_comprehensive(): """Test all print utility functions for coverage.""" - # Capture console logger output (console functions emit via stdlib logging) - captured_output = io.StringIO() - logger = logging.getLogger('console') - previous_level = logger.level - previous_handlers = list(logger.handlers) - previous_propagate = logger.propagate - - handler = logging.StreamHandler(captured_output) - handler.setFormatter(logging.Formatter('%(message)s')) - - logger.handlers = [handler] - logger.setLevel(logging.DEBUG) - logger.propagate = False - - try: + def run_all(): print_info('Test info message') print_ok('Test success message') print_warning('Test warning message') @@ -450,33 +400,28 @@ def test_print_functions_comprehensive(): print_message('Test message') print_val('Test key', 'Test value') - output = captured_output.getvalue() - assert 'Test info message' in output - assert 'Test success message' in output - assert 'Test warning message' in output - assert 'Test error message' in output - assert 'Test message' in output - assert 'Test key' in output - assert 'Test value' in output - finally: - logger.handlers = previous_handlers - logger.setLevel(previous_level) - logger.propagate = previous_propagate - - -def test_test_url_preflight_check_with_frontdoor(monkeypatch): + output = capture_output(run_all) + + assert 'Test info message' in output + assert 'Test success message' in output + assert 'Test warning message' in output + assert 'Test error message' in output + assert 'Test message' in output + assert 'Test key' in output + assert 'Test value' in output + + +def test_test_url_preflight_check_with_frontdoor(monkeypatch, suppress_console): """Test URL preflight check when Front Door is available.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: 'https://test.azurefd.net') - monkeypatch.setattr('console.print_message', lambda x, **kw: None) result = utils.test_url_preflight_check(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg', 'https://apim.com') assert result == 'https://test.azurefd.net' -def test_test_url_preflight_check_no_frontdoor(monkeypatch): +def test_test_url_preflight_check_no_frontdoor(monkeypatch, suppress_console): """Test URL preflight check when Front Door is not available.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: None) - monkeypatch.setattr('console.print_message', lambda x, **kw: None) result = utils.test_url_preflight_check(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg', 'https://apim.com') assert result == 'https://apim.com' @@ -512,23 +457,17 @@ def test_determine_policy_path_full_path(): assert result == full_path -def test_wait_for_apim_blob_permissions_success(monkeypatch): +def test_wait_for_apim_blob_permissions_success(monkeypatch, suppress_console): """Test wait_for_apim_blob_permissions with successful wait.""" monkeypatch.setattr(az, 'check_apim_blob_permissions', lambda *args: True) - monkeypatch.setattr('console.print_info', lambda x: None) - monkeypatch.setattr('console.print_ok', lambda x: None) - monkeypatch.setattr('console.print_error', lambda x: None) result = utils.wait_for_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) assert result is True -def test_wait_for_apim_blob_permissions_failure(monkeypatch): +def test_wait_for_apim_blob_permissions_failure(monkeypatch, suppress_console): """Test wait_for_apim_blob_permissions with failed wait.""" monkeypatch.setattr(az, 'check_apim_blob_permissions', lambda *args: False) - monkeypatch.setattr('console.print_info', lambda x: None) - monkeypatch.setattr('console.print_ok', lambda x: None) - monkeypatch.setattr('console.print_error', lambda x: None) result = utils.wait_for_apim_blob_permissions('test-apim', 'test-storage', 'test-rg', 1) assert result is False @@ -540,8 +479,7 @@ def test_read_policy_xml_with_sample_name_explicit(monkeypatch): monkeypatch.setattr('utils.get_project_root', lambda: mock_project_root) xml_content = '' - m = mock_open(read_data=xml_content) - monkeypatch.setattr(builtins, 'open', m) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in str(p), read_data=xml_content) result = utils.read_policy_xml('policy.xml', sample_name='test-sample') assert result == xml_content @@ -551,8 +489,7 @@ def test_read_policy_xml_with_named_values_formatting(monkeypatch): """Test read_policy_xml with named values formatting.""" xml_content = '{jwt_key}' expected = '{{JwtSigningKey}}' - m = mock_open(read_data=xml_content) - monkeypatch.setattr(builtins, 'open', m) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in str(p), read_data=xml_content) named_values = {'jwt_key': 'JwtSigningKey'} result = utils.read_policy_xml('/path/to/policy.xml', named_values) @@ -690,8 +627,11 @@ def test_get_azure_role_guid_comprehensive(monkeypatch): 'Storage Account Contributor': '17d1049b-9a84-46fb-8f53-869881c3d3ab' } - m = mock_open(read_data=json.dumps(mock_roles)) - monkeypatch.setattr(builtins, 'open', m) + patch_open_for_text_read( + monkeypatch, + match=lambda p: str(p).endswith('azure-roles.json') or 'azure-roles.json' in str(p), + read_data=json.dumps(mock_roles), + ) # Test valid role result = az.get_azure_role_guid('Storage Blob Data Reader') @@ -803,6 +743,239 @@ def test_query_and_select_infrastructure_user_selects_existing(monkeypatch, supp assert selected_infra == INFRASTRUCTURE.SIMPLE_APIM assert selected_index == 5 + +@pytest.mark.unit +def test_query_and_select_infrastructure_user_selects_create_new(monkeypatch, suppress_utils_console): + """Test when user selects option to create new infrastructure.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim-1', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr( + az, + 'find_infrastructure_instances', + lambda infra: [(INFRASTRUCTURE.SIMPLE_APIM, 5)] if infra == INFRASTRUCTURE.SIMPLE_APIM else [], + ) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + + created_helpers = [] + + class DummyInfraHelper: + def __init__(self, rg_location, deployment, index, apim_sku): + self.rg_location = rg_location + self.deployment = deployment + self.index = index + self.apim_sku = apim_sku + self.calls = [] + created_helpers.append(self) + + def create_infrastructure(self, bypass): + self.calls.append(bypass) + return True + + monkeypatch.setattr(utils, 'InfrastructureNotebookHelper', DummyInfraHelper) + monkeypatch.setattr('builtins.input', lambda prompt: '1') # Select "Create a NEW infrastructure" + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra == INFRASTRUCTURE.SIMPLE_APIM + # When user selects option 1 (create_new), the index is from the helper (not None, it's 1 from the nb_helper.index) + assert selected_index == 1 + assert created_helpers + assert created_helpers[0].calls == [True] + + +@pytest.mark.unit +def test_query_and_select_infrastructure_user_enters_empty_string(monkeypatch, suppress_utils_console): + """Test when user enters empty string (no infrastructure selected).""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim-1', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr( + az, + 'find_infrastructure_instances', + lambda infra: [(INFRASTRUCTURE.SIMPLE_APIM, 5)] if infra == INFRASTRUCTURE.SIMPLE_APIM else [], + ) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + monkeypatch.setattr('builtins.input', lambda prompt: '') + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra is None + assert selected_index is None + + +@pytest.mark.unit +def test_query_and_select_infrastructure_user_enters_invalid_then_valid(monkeypatch, suppress_utils_console): + """Test when user enters invalid choice then valid choice.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim-1', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr( + az, + 'find_infrastructure_instances', + lambda infra: [(INFRASTRUCTURE.SIMPLE_APIM, 5)] if infra == INFRASTRUCTURE.SIMPLE_APIM else [], + ) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + inputs = iter(['999', '0', '-1', '2']) # Invalid then valid + monkeypatch.setattr('builtins.input', lambda prompt: next(inputs)) + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra == INFRASTRUCTURE.SIMPLE_APIM + assert selected_index == 5 + + +@pytest.mark.unit +def test_query_and_select_infrastructure_user_enters_non_numeric(monkeypatch, suppress_utils_console): + """Test when user enters non-numeric input then valid numeric choice.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim-1', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr( + az, + 'find_infrastructure_instances', + lambda infra: [(INFRASTRUCTURE.SIMPLE_APIM, 5)] if infra == INFRASTRUCTURE.SIMPLE_APIM else [], + ) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + inputs = iter(['abc', 'xyz', '2']) # Non-numeric then valid + monkeypatch.setattr('builtins.input', lambda prompt: next(inputs)) + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra == INFRASTRUCTURE.SIMPLE_APIM + assert selected_index == 5 + + +@pytest.mark.unit +def test_query_and_select_infrastructure_infrastructure_creation_fails(monkeypatch, suppress_utils_console): + """Test when infrastructure creation fails.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr(az, 'find_infrastructure_instances', lambda infra: []) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + + class DummyInfraHelper: + def __init__(self, rg_location, deployment, index, apim_sku): + pass + + def create_infrastructure(self, bypass): + return False # Creation fails + + monkeypatch.setattr(utils, 'InfrastructureNotebookHelper', DummyInfraHelper) + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra is None + assert selected_index is None + + +@pytest.mark.unit +def test_query_and_select_infrastructure_multiple_infrastructure_types(monkeypatch, suppress_utils_console): + """Test when multiple infrastructure types are available.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-appgw-apim-1', + 'eastus', + INFRASTRUCTURE.APPGW_APIM, + [INFRASTRUCTURE.APPGW_APIM, INFRASTRUCTURE.SIMPLE_APIM], + ) + + def mock_find_instances(infra): + if infra == INFRASTRUCTURE.APPGW_APIM: + return [(INFRASTRUCTURE.APPGW_APIM, 1)] + elif infra == INFRASTRUCTURE.SIMPLE_APIM: + return [(INFRASTRUCTURE.SIMPLE_APIM, 2)] + return [] + + monkeypatch.setattr(az, 'find_infrastructure_instances', mock_find_instances) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + monkeypatch.setattr('builtins.input', lambda prompt: '2') # Select first existing (appgw-apim-1) + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra == INFRASTRUCTURE.APPGW_APIM + assert selected_index == 1 + + +@pytest.mark.unit +def test_query_and_select_infrastructure_with_none_index(monkeypatch, suppress_utils_console): + """Test when infrastructure instances have None as index.""" + nb_helper = utils.NotebookHelper( + 'test-sample', + 'apim-infra-simple-apim', + 'eastus', + INFRASTRUCTURE.SIMPLE_APIM, + [INFRASTRUCTURE.SIMPLE_APIM], + ) + + monkeypatch.setattr( + az, + 'find_infrastructure_instances', + lambda infra: [(INFRASTRUCTURE.SIMPLE_APIM, None)] if infra == INFRASTRUCTURE.SIMPLE_APIM else [], + ) + monkeypatch.setattr( + az, + 'get_infra_rg_name', + lambda infra, index=None: f'apim-infra-{infra.value}' if index is None else f'apim-infra-{infra.value}-{index}', + ) + monkeypatch.setattr('builtins.input', lambda prompt: '2') # Select the existing one + + selected_infra, selected_index = nb_helper._query_and_select_infrastructure() + + assert selected_infra == INFRASTRUCTURE.SIMPLE_APIM + assert selected_index is None + + # ------------------------------ # TESTS FOR _prompt_for_infrastructure_update # ------------------------------ @@ -860,7 +1033,7 @@ def test_prompt_for_infrastructure_update_invalid_choice_then_valid(monkeypatch) # TESTS FOR InfrastructureNotebookHelper.create_infrastructure WITH INDEX RETRY # ------------------------------ -def test_infrastructure_notebook_helper_create_with_index_retry(monkeypatch): +def test_infrastructure_notebook_helper_create_with_index_retry(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper.create_infrastructure with option 2 (different index) retry.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) @@ -877,33 +1050,15 @@ def mock_rg_exists(rg_name): monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', lambda rg_name: (False, 3)) monkeypatch.setattr(az, 'does_resource_group_exist', mock_rg_exists) - # Mock subprocess execution to succeed - class MockProcess: - def __init__(self, *args, **kwargs): - self.returncode = 0 - self.stdout = iter(['Mock deployment output\n', 'Success!\n']) - - def wait(self): - pass - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - - monkeypatch.setattr('subprocess.Popen', MockProcess) + mock_popen(monkeypatch, stdout_lines=['Mock deployment output\n', 'Success!\n']) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') - # Mock print functions to avoid output during testing - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Should succeed after retrying with index 3 result = helper.create_infrastructure() assert result is True assert helper.index == 3 # Verify index was updated -def test_infrastructure_notebook_helper_create_with_recursive_retry(monkeypatch): +def test_infrastructure_notebook_helper_create_with_recursive_retry(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper.create_infrastructure with multiple recursive retries.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) @@ -931,31 +1086,14 @@ def mock_prompt(rg_name): monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', mock_prompt) monkeypatch.setattr(az, 'does_resource_group_exist', mock_rg_exists) - # Mock subprocess execution to succeed - class MockProcess: - def __init__(self, *args, **kwargs): - self.returncode = 0 - self.stdout = iter(['Mock deployment output\n']) - - def wait(self): - pass - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - - monkeypatch.setattr('subprocess.Popen', MockProcess) + mock_popen(monkeypatch, stdout_lines=['Mock deployment output\n']) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Should succeed after retrying with index 3 result = helper.create_infrastructure() assert result is True assert helper.index == 3 # Verify final index -def test_infrastructure_notebook_helper_create_user_cancellation(monkeypatch): +def test_infrastructure_notebook_helper_create_user_cancellation(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper.create_infrastructure when user cancels during retry.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) @@ -965,15 +1103,13 @@ def test_infrastructure_notebook_helper_create_user_cancellation(monkeypatch): # Mock the prompt to return cancellation (option 3) monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', lambda rg_name: (False, None)) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Should raise SystemExit when user cancels with pytest.raises(SystemExit) as exc_info: helper.create_infrastructure() assert "User cancelled deployment" in str(exc_info.value) -def test_infrastructure_notebook_helper_create_keyboard_interrupt_during_prompt(monkeypatch): +def test_infrastructure_notebook_helper_create_keyboard_interrupt_during_prompt(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper.create_infrastructure when KeyboardInterrupt occurs during prompt.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) @@ -986,15 +1122,13 @@ def mock_prompt(rg_name): raise KeyboardInterrupt() monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', mock_prompt) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Should raise SystemExit when KeyboardInterrupt occurs with pytest.raises(SystemExit) as exc_info: helper.create_infrastructure() assert "User cancelled deployment" in str(exc_info.value) -def test_infrastructure_notebook_helper_create_eof_error_during_prompt(monkeypatch): +def test_infrastructure_notebook_helper_create_eof_error_during_prompt(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper.create_infrastructure when EOFError occurs during prompt.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) @@ -1007,15 +1141,13 @@ def mock_prompt(rg_name): raise EOFError() monkeypatch.setattr(utils, '_prompt_for_infrastructure_update', mock_prompt) - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Should raise SystemExit when EOFError occurs with pytest.raises(SystemExit) as exc_info: helper.create_infrastructure() assert "User cancelled deployment" in str(exc_info.value) -def test_deploy_sample_with_infrastructure_selection(monkeypatch): +def test_deploy_sample_with_infrastructure_selection(monkeypatch, suppress_console): """Test deploy_sample method with infrastructure selection when original doesn't exist.""" nb_helper = utils.NotebookHelper( 'test-sample', 'test-rg', 'eastus', @@ -1039,9 +1171,6 @@ def test_deploy_sample_with_infrastructure_selection(monkeypatch): # Mock utility functions monkeypatch.setattr(az, 'get_infra_rg_name', lambda infra, idx: f'apim-infra-{infra.value}-{idx}') - monkeypatch.setattr('console.print_error', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_ok', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_val', lambda *args, **kwargs: None) # Test the deployment result = nb_helper.deploy_sample({'test': {'value': 'param'}}) @@ -1318,10 +1447,9 @@ def test_notebookhelper_get_current_index_non_numeric_suffix(monkeypatch): # NotebookHelper._clean_up_jwt TESTS # ------------------------------ -def test_notebookhelper_clean_up_jwt_success(monkeypatch): +def test_notebookhelper_clean_up_jwt_success(monkeypatch, suppress_console): """Test _clean_up_jwt with successful cleanup.""" monkeypatch.setattr(az, 'cleanup_old_jwt_signing_keys', lambda *args: True) - monkeypatch.setattr('console.print_warning', lambda *args, **kwargs: None) monkeypatch.setattr(utils, 'generate_signing_key', lambda: ('test-key', 'test-key-b64')) monkeypatch.setattr(utils, 'print_val', lambda *args, **kwargs: None) monkeypatch.setattr('time.time', lambda: 1234567890) @@ -1358,12 +1486,11 @@ def test_notebookhelper_clean_up_jwt_failure(monkeypatch, caplog): # get_endpoints TESTS # ------------------------------ -def test_get_endpoints_comprehensive(monkeypatch): +def test_get_endpoints_comprehensive(monkeypatch, suppress_console): """Test get_endpoints function.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: 'https://test-afd.azurefd.net') monkeypatch.setattr(az, 'get_apim_url', lambda x: 'https://test-apim.azure-api.net') monkeypatch.setattr(az, 'get_appgw_endpoint', lambda x: ('appgw.contoso.com', '1.2.3.4')) - monkeypatch.setattr('console.print_message', lambda x, **kw: None) endpoints = utils.get_endpoints(INFRASTRUCTURE.AFD_APIM_PE, 'test-rg') @@ -1373,12 +1500,11 @@ def test_get_endpoints_comprehensive(monkeypatch): assert endpoints.appgw_public_ip == '1.2.3.4' -def test_get_endpoints_no_frontdoor(monkeypatch): +def test_get_endpoints_no_frontdoor(monkeypatch, suppress_console): """Test get_endpoints when Front Door is not available.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: None) monkeypatch.setattr(az, 'get_apim_url', lambda x: 'https://test-apim.azure-api.net') monkeypatch.setattr(az, 'get_appgw_endpoint', lambda x: (None, None)) - monkeypatch.setattr('console.print_message', lambda x, **kw: None) endpoints = utils.get_endpoints(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') @@ -1404,10 +1530,8 @@ def test_get_json_python_dict_string(): assert result == {'key': 'value', 'number': 42} -def test_get_json_invalid_string(monkeypatch): +def test_get_json_invalid_string(monkeypatch, suppress_console): """Test get_json with invalid string.""" - monkeypatch.setattr('console.print_error', lambda *args, **kwargs: None) - invalid_str = "not valid json or python literal" result = utils.get_json(invalid_str) # Should return the original string when parsing fails @@ -1427,48 +1551,39 @@ def test_get_json_non_string(): # does_infrastructure_exist TESTS # ------------------------------ -def test_does_infrastructure_exist_not_exist(monkeypatch): +def test_does_infrastructure_exist_not_exist(monkeypatch, suppress_console): """Test does_infrastructure_exist when infrastructure doesn't exist.""" monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: False) monkeypatch.setattr(az, 'get_infra_rg_name', lambda x, y: 'test-rg') - monkeypatch.setattr('console.print_plain', lambda *args, **kwargs: None) result = utils.does_infrastructure_exist(INFRASTRUCTURE.SIMPLE_APIM, 1) assert result is False -def test_does_infrastructure_exist_with_update_option_proceed(monkeypatch): +def test_does_infrastructure_exist_with_update_option_proceed(monkeypatch, suppress_console): """Test does_infrastructure_exist with update option - user proceeds.""" monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: True) monkeypatch.setattr(az, 'get_infra_rg_name', lambda x, y: 'test-rg') - monkeypatch.setattr('console.print_ok', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_plain', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_info', lambda *args, **kwargs: None) monkeypatch.setattr('builtins.input', lambda prompt: '1') result = utils.does_infrastructure_exist(INFRASTRUCTURE.SIMPLE_APIM, 1, allow_update_option=True) assert result is False # Allow deployment to proceed -def test_does_infrastructure_exist_with_update_option_cancel(monkeypatch): +def test_does_infrastructure_exist_with_update_option_cancel(monkeypatch, suppress_console): """Test does_infrastructure_exist with update option - user cancels.""" monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: True) monkeypatch.setattr(az, 'get_infra_rg_name', lambda x, y: 'test-rg') - monkeypatch.setattr('console.print_ok', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_plain', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_info', lambda *args, **kwargs: None) monkeypatch.setattr('builtins.input', lambda prompt: '2') result = utils.does_infrastructure_exist(INFRASTRUCTURE.SIMPLE_APIM, 1, allow_update_option=True) assert result is True # Block deployment -def test_does_infrastructure_exist_without_update_option(monkeypatch): +def test_does_infrastructure_exist_without_update_option(monkeypatch, suppress_console): """Test does_infrastructure_exist without update option.""" monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: True) monkeypatch.setattr(az, 'get_infra_rg_name', lambda x, y: 'test-rg') - monkeypatch.setattr('console.print_ok', lambda *args, **kwargs: None) - monkeypatch.setattr('console.print_plain', lambda *args, **kwargs: None) result = utils.does_infrastructure_exist(INFRASTRUCTURE.SIMPLE_APIM, 1, allow_update_option=False) assert result is True # Infrastructure exists, block deployment @@ -1481,18 +1596,7 @@ def test_does_infrastructure_exist_without_update_option(monkeypatch): def test_read_and_modify_policy_xml_with_replacements(monkeypatch): """Test read_and_modify_policy_xml with placeholders.""" xml_content = '{jwt_key}{api_value}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if 'test-policy.xml' in file_str and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'test-policy.xml' in p, read_data=xml_content) replacements = { 'jwt_key': 'JwtSigningKey123', @@ -1507,18 +1611,7 @@ def open_selector(file, *args, **kwargs): def test_read_and_modify_policy_xml_placeholder_not_found(monkeypatch, caplog): """Test read_and_modify_policy_xml when placeholder doesn't exist in XML.""" xml_content = 'static' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if 'test-policy.xml' in file_str and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'test-policy.xml' in p, read_data=xml_content) replacements = {'missing_key': 'value'} @@ -1532,18 +1625,7 @@ def open_selector(file, *args, **kwargs): def test_read_and_modify_policy_xml_none_replacements(monkeypatch): """Test read_and_modify_policy_xml with None replacements.""" xml_content = '{jwt_key}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if 'test-policy.xml' in file_str and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'test-policy.xml' in p, read_data=xml_content) result = utils.read_and_modify_policy_xml('/path/to/test-policy.xml', None) # Should return unmodified XML @@ -1568,60 +1650,26 @@ def test_determine_shared_policy_path(monkeypatch): # InfrastructureNotebookHelper TESTS # ------------------------------ -def test_infrastructure_notebook_helper_bypass_check(monkeypatch): +def test_infrastructure_notebook_helper_bypass_check(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper with bypass_infrastructure_check=True.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) - # Mock subprocess execution to succeed - class MockProcess: - def __init__(self, *args, **kwargs): - self.returncode = 0 - self.stdout = iter(['Mock deployment output\n']) - - def wait(self): - pass - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - - monkeypatch.setattr('subprocess.Popen', MockProcess) + mock_popen(monkeypatch, stdout_lines=['Mock deployment output\n']) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # Test with bypass_infrastructure_check=True result = helper.create_infrastructure(bypass_infrastructure_check=True) assert result is True -def test_infrastructure_notebook_helper_allow_update_false(monkeypatch): +def test_infrastructure_notebook_helper_allow_update_false(monkeypatch, suppress_builtin_print): """Test InfrastructureNotebookHelper with allow_update=False.""" helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) # Mock RG exists but allow_update=False monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: True) - # Mock subprocess execution to succeed - class MockProcess: - def __init__(self, *args, **kwargs): - self.returncode = 0 - self.stdout = iter(['Mock deployment output\n']) - - def wait(self): - pass - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - - monkeypatch.setattr('subprocess.Popen', MockProcess) + mock_popen(monkeypatch, stdout_lines=['Mock deployment output\n']) monkeypatch.setattr(utils, 'find_project_root', lambda: 'c:\\mock\\root') - monkeypatch.setattr('builtins.print', lambda *args, **kwargs: None) - # With allow_update=False, should still create when infrastructure doesn't exist result = helper.create_infrastructure(allow_update=False, bypass_infrastructure_check=True) assert result is True @@ -1635,27 +1683,22 @@ def test_infrastructure_notebook_helper_missing_args(): utils.InfrastructureNotebookHelper('eastus') -def test_does_infrastructure_exist_with_prompt_multiple_retries(monkeypatch): +def test_does_infrastructure_exist_with_prompt_multiple_retries(monkeypatch, suppress_console): """Test does_infrastructure_exist when user makes multiple invalid entries.""" inputs = iter(['invalid', '4', '0', '2']) # Invalid entries, then valid option 2 monkeypatch.setattr('builtins.input', lambda prompt: next(inputs)) monkeypatch.setattr(az, 'does_resource_group_exist', lambda x: True) monkeypatch.setattr(az, 'get_infra_rg_name', lambda x, y: 'test-rg') - monkeypatch.setattr('console.print_ok', lambda *a, **kwargs: None) - monkeypatch.setattr('console.print_plain', lambda *a, **kwargs: None) - monkeypatch.setattr('console.print_info', lambda *a, **kwargs: None) - monkeypatch.setattr('console.print_error', lambda *a, **kwargs: None) result = utils.does_infrastructure_exist(INFRASTRUCTURE.SIMPLE_APIM, 1, allow_update_option=True) assert result is True # Block deployment -def test_get_endpoints_with_none_values(monkeypatch): +def test_get_endpoints_with_none_values(monkeypatch, suppress_console): """Test get_endpoints when some endpoints are None.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: None) monkeypatch.setattr(az, 'get_apim_url', lambda x: 'https://test-apim.azure-api.net') monkeypatch.setattr(az, 'get_appgw_endpoint', lambda x: (None, None)) - monkeypatch.setattr('console.print_message', lambda x, **kw: None) endpoints = utils.get_endpoints(INFRASTRUCTURE.SIMPLE_APIM, 'test-rg') @@ -1731,16 +1774,14 @@ def test_deployment_failure_message_consistency(monkeypatch): def test_create_bicep_deployment_group_with_debug_mode(monkeypatch): """Test create_bicep_deployment_group with debug mode enabled.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) - mock_open_func = mock_open() - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) + _mock_create_rg, mock_run, _mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) bicep_params = {'param1': {'value': 'test'}} @@ -1758,18 +1799,7 @@ def test_create_bicep_deployment_group_with_debug_mode(monkeypatch): def test_read_policy_xml_complex_replacements(monkeypatch): """Test read_and_modify_policy_xml with complex replacement scenarios.""" xml_content = '{placeholder1}{placeholder2}{placeholder3}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - file_str = str(file) - if 'policy.xml' in file_str and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in p, read_data=xml_content) replacements = { 'placeholder1': 'value1', @@ -1807,10 +1837,14 @@ def test_infrastructure_tags_with_special_characters(): def test_bicep_parameters_serialization(monkeypatch): """Test that bicep parameters serialize correctly to JSON.""" - mock_create_rg = MagicMock() - monkeypatch.setattr(az, 'create_resource_group', mock_create_rg) - mock_run = MagicMock(return_value=MagicMock(success=True)) - monkeypatch.setattr(az, 'run', mock_run) + _mock_create_rg, _mock_run, mock_open_func = patch_create_bicep_deployment_group_dependencies( + monkeypatch, + az_module=az, + run_success=True, + cwd='/test/dir', + exists=True, + basename='test-dir', + ) # Track file writes written_content = [] @@ -1819,13 +1853,7 @@ def mock_file_write(content): written_content.append(content) return len(content) - mock_open_func = mock_open() mock_open_func.return_value.__enter__.return_value.write = mock_file_write - monkeypatch.setattr(builtins, 'open', mock_open_func) - monkeypatch.setattr(builtins, 'print', MagicMock()) - monkeypatch.setattr('os.getcwd', MagicMock(return_value='/test/dir')) - monkeypatch.setattr('os.path.exists', MagicMock(return_value=True)) - monkeypatch.setattr('os.path.basename', MagicMock(return_value='test-dir')) bicep_params = { 'apiManagementName': {'value': 'test-apim'}, @@ -1919,17 +1947,7 @@ def test_determine_bicep_directory_with_main_bicep_in_current(monkeypatch): def test_read_policy_xml_with_special_characters(monkeypatch): """Test read_policy_xml with special characters and Unicode.""" xml_content = 'Unicode: © ® ™ € Chinese: 中文 Arabic: العربية' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - if 'policy.xml' in str(file) and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in p, read_data=xml_content) result = utils.read_policy_xml('/path/to/policy.xml') assert '©' in result @@ -1977,13 +1995,10 @@ def test_create_bicep_deployment_group_for_sample_with_custom_params_file(monkey assert 'my-fragment.xml' in result -def test_wait_for_apim_blob_permissions_with_custom_timeout(monkeypatch): +def test_wait_for_apim_blob_permissions_with_custom_timeout(monkeypatch, suppress_console): """Test wait_for_apim_blob_permissions with custom timeout.""" mock_check = MagicMock(return_value=True) monkeypatch.setattr(az, 'check_apim_blob_permissions', mock_check) - monkeypatch.setattr('console.print_info', lambda *a, **kw: None) - monkeypatch.setattr('console.print_ok', lambda *a, **kw: None) - monkeypatch.setattr('console.print_plain', lambda *a, **kw: None) result = utils.wait_for_apim_blob_permissions( 'test-apim', 'test-storage', 'test-rg', max_wait_minutes=5 @@ -1994,10 +2009,9 @@ def test_wait_for_apim_blob_permissions_with_custom_timeout(monkeypatch): mock_check.assert_called_once_with('test-apim', 'test-storage', 'test-rg', 5) -def test_test_url_preflight_check_with_afd_endpoint(monkeypatch): +def test_test_url_preflight_check_with_afd_endpoint(monkeypatch, suppress_console): """Test test_url_preflight_check selects AFD when available.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: 'https://afd-endpoint.azurefd.net') - monkeypatch.setattr('console.print_message', lambda *a, **kw: None) result = utils.test_url_preflight_check( INFRASTRUCTURE.AFD_APIM_PE, 'test-rg', 'https://apim.azure-api.net' @@ -2006,10 +2020,9 @@ def test_test_url_preflight_check_with_afd_endpoint(monkeypatch): assert result == 'https://afd-endpoint.azurefd.net' -def test_test_url_preflight_check_without_afd(monkeypatch): +def test_test_url_preflight_check_without_afd(monkeypatch, suppress_console): """Test test_url_preflight_check uses APIM when no AFD.""" monkeypatch.setattr(az, 'get_frontdoor_url', lambda x, y: None) - monkeypatch.setattr('console.print_message', lambda *a, **kw: None) result = utils.test_url_preflight_check( INFRASTRUCTURE.SIMPLE_APIM, 'test-rg', 'https://apim.azure-api.net' @@ -2161,17 +2174,7 @@ def test_determine_policy_path_with_relative_path(): def test_read_policy_xml_with_multiple_named_values(monkeypatch): """Test read_policy_xml with multiple named values.""" xml_content = '{var1}{var2}{var3}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - if 'policy.xml' in str(file) and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in p, read_data=xml_content) named_values = { 'var1': 'jwt-signing-key', @@ -2190,17 +2193,7 @@ def open_selector(file, *args, **kwargs): def test_read_and_modify_policy_xml_with_empty_replacements(monkeypatch): """Test read_and_modify_policy_xml with empty replacements dict.""" xml_content = '{placeholder}' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - if 'policy.xml' in str(file) and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in p, read_data=xml_content) result = utils.read_and_modify_policy_xml('/path/to/policy.xml', {}) @@ -2216,17 +2209,7 @@ def test_read_and_modify_policy_xml_preserves_formatting(monkeypatch): ''' - m = mock_open(read_data=xml_content) - - real_open = builtins.open - - def open_selector(file, *args, **kwargs): - mode = kwargs.get('mode', args[0] if args else 'r') - if 'policy.xml' in str(file) and 'b' not in mode: - return m(file, *args, **kwargs) - return real_open(file, *args, **kwargs) - - monkeypatch.setattr(builtins, 'open', open_selector) + patch_open_for_text_read(monkeypatch, match=lambda p: 'policy.xml' in p, read_data=xml_content) replacements = {'placeholder': 'actual-value'} result = utils.read_and_modify_policy_xml('/path/to/policy.xml', replacements) @@ -2343,3 +2326,160 @@ def test_output_get_with_deep_nesting(): # This tests the nested value extraction - Output.get returns str result = output.get('deep', 'Deep value') assert "{'nested':" in result or '{"nested":' in result + + +# ------------------------------ +# Additional coverage +# ------------------------------ + +def test_create_infrastructure_unsupported_type(monkeypatch, suppress_utils_console): + class Unsupported: + value = 'unsupported' + + helper = utils.InfrastructureNotebookHelper('eastus', Unsupported(), 1, APIM_SKU.BASICV2) + + # Skip update checks + monkeypatch.setattr(az, 'get_infra_rg_name', lambda *_, **__: 'rg') + monkeypatch.setattr(az, 'does_resource_group_exist', lambda *_, **__: False) + + with pytest.raises(SystemExit): + helper.create_infrastructure(bypass_infrastructure_check=False, allow_update=False) + + +def test_create_infrastructure_stream_error(monkeypatch, tmp_path, suppress_utils_console): + helper = utils.InfrastructureNotebookHelper('eastus', INFRASTRUCTURE.SIMPLE_APIM, 1, APIM_SKU.BASICV2) + + monkeypatch.setattr(utils, 'find_project_root', lambda: str(tmp_path)) + monkeypatch.setattr(az, 'get_infra_rg_name', lambda *_, **__: 'rg') + monkeypatch.setattr(az, 'does_resource_group_exist', lambda *_, **__: False) + + class BoomIter: + def __iter__(self): + raise ValueError('boom') + + class FakeProcess: + def __init__(self): + self.stdout = BoomIter() + self.returncode = 1 + + def wait(self): + self.returncode = 1 + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + monkeypatch.setattr(subprocess, 'Popen', lambda *_, **__: FakeProcess()) + + with pytest.raises(SystemExit): + helper.create_infrastructure(bypass_infrastructure_check=False, allow_update=False) + + +def test_determine_bicep_directory_current_infra(monkeypatch, tmp_path): + infra_dir = tmp_path / 'foo' + infra_dir.mkdir() + + monkeypatch.chdir(infra_dir) + + assert utils._determine_bicep_directory('foo') == str(infra_dir) + + +def test_determine_bicep_directory_in_current_tree(monkeypatch, tmp_path): + bicep_dir = tmp_path / 'infrastructure' / 'bar' + bicep_dir.mkdir(parents=True) + + monkeypatch.chdir(tmp_path) + + assert utils._determine_bicep_directory('bar') == str(bicep_dir) + + +def test_determine_bicep_directory_in_parent_tree(monkeypatch, tmp_path): + workdir = tmp_path / 'work' + workdir.mkdir() + parent_bicep_dir = tmp_path / 'infrastructure' / 'baz' + parent_bicep_dir.mkdir(parents=True) + + monkeypatch.chdir(workdir) + + assert utils._determine_bicep_directory('baz') == str(parent_bicep_dir) + + +def test_determine_bicep_directory_from_project_root(monkeypatch, tmp_path): + elsewhere = tmp_path / 'elsewhere' + elsewhere.mkdir() + + project_root = tmp_path / 'project' + project_bicep_dir = project_root / 'infrastructure' / 'qux' + project_bicep_dir.mkdir(parents=True) + + monkeypatch.chdir(elsewhere) + monkeypatch.setattr(utils, 'get_project_root', lambda: str(project_root)) + + assert utils._determine_bicep_directory('qux') == str(project_bicep_dir) + + +def test_determine_bicep_directory_falls_back(monkeypatch, tmp_path): + nowhere = tmp_path / 'nowhere' + nowhere.mkdir() + + monkeypatch.chdir(nowhere) + monkeypatch.setattr(utils, 'get_project_root', lambda: (_ for _ in ()).throw(ValueError('no root'))) + + expected = os.path.join(str(nowhere), 'infrastructure', 'missing') + assert utils._determine_bicep_directory('missing') == expected + + +def test_create_bicep_deployment_group_for_sample_missing_dir(monkeypatch, tmp_path, suppress_utils_console): + monkeypatch.setattr(utils, 'find_project_root', lambda: str(tmp_path)) + + with pytest.raises(FileNotFoundError): + utils.create_bicep_deployment_group_for_sample('absent', 'rg', 'loc', {}) + + +def test_create_bicep_deployment_group_for_sample_missing_main(monkeypatch, tmp_path, suppress_utils_console): + sample_dir = tmp_path / 'samples' / 'demo' + sample_dir.mkdir(parents=True) + + monkeypatch.setattr(utils, 'find_project_root', lambda: str(tmp_path)) + + with pytest.raises(FileNotFoundError): + utils.create_bicep_deployment_group_for_sample('demo', 'rg', 'loc', {}) + + +def test_create_bicep_deployment_group_for_sample_in_sample_dir(monkeypatch, tmp_path, suppress_utils_console): + sample_dir = tmp_path / 'samples' / 'demo' + sample_dir.mkdir(parents=True) + (sample_dir / 'main.bicep').write_text('// bicep', encoding='utf-8') + + monkeypatch.chdir(sample_dir) + monkeypatch.setattr(utils, 'create_bicep_deployment_group', lambda *_, **__: 'ok') + + result = utils.create_bicep_deployment_group_for_sample('demo', 'rg', 'loc', {}) + assert result == 'ok' + + +def test_determine_policy_path_missing_sample_name(monkeypatch): + class FakeFrame: + def __init__(self): + self.f_back = MagicMock() + self.f_back.f_globals = {'__file__': str(Path('/tmp/samples'))} + + monkeypatch.setattr(utils.inspect, 'currentframe', FakeFrame) + + with pytest.raises(ValueError, match='Could not detect sample name'): + utils.determine_policy_path('policy.xml') + + +def test_determine_policy_path_fallback_to_cwd(monkeypatch, tmp_path): + class FakeFrame: + def __init__(self): + self.f_back = MagicMock() + self.f_back.f_globals = {} + + monkeypatch.chdir(tmp_path) + monkeypatch.setattr(utils.inspect, 'currentframe', FakeFrame) + + with pytest.raises(ValueError, match='Not running from within a samples directory'): + utils.determine_policy_path('policy.xml')