diff --git a/src/agentready/cli/main.py b/src/agentready/cli/main.py index 73f888e7..52a1324a 100644 --- a/src/agentready/cli/main.py +++ b/src/agentready/cli/main.py @@ -267,20 +267,37 @@ def run_assessment( # Create all assessors first all_assessors = create_all_assessors() - # Validate exclusions (strict mode) - if exclude: + # Merge exclusions from CLI --exclude flag and config.excluded_attributes + # Fix for #302: config.excluded_attributes was previously ignored + cli_exclusions = set(exclude or []) + config_exclusions = set(config.excluded_attributes if config else []) + all_exclusions = cli_exclusions | config_exclusions + + # Validate exclusions (strict mode) - applies to both CLI and config sources + if all_exclusions: valid_ids = {a.attribute_id for a in all_assessors} - invalid_ids = set(exclude) - valid_ids + invalid_ids = all_exclusions - valid_ids if invalid_ids: - raise click.BadParameter( - f"Invalid attribute ID(s): {', '.join(sorted(invalid_ids))}. " - f"Valid IDs: {', '.join(sorted(valid_ids))}" - ) + # Determine source of invalid IDs for clearer error messages + invalid_from_cli = invalid_ids & cli_exclusions + invalid_from_config = invalid_ids & config_exclusions + + # Build error message that includes all invalid IDs from both sources + msg = "" + if invalid_from_cli: + msg = f"Invalid attribute ID(s): {', '.join(sorted(invalid_from_cli))}." + if invalid_from_config: + if msg: + msg += f" Also invalid in config file: {', '.join(sorted(invalid_from_config))}." + else: + msg = f"Invalid attribute ID(s) in config file: {', '.join(sorted(invalid_from_config))}." + msg += f" Valid IDs: {', '.join(sorted(valid_ids))}" + raise click.ClickException(msg) # Filter out excluded assessors - assessors = [a for a in all_assessors if a.attribute_id not in exclude] - if verbose and exclude: + assessors = [a for a in all_assessors if a.attribute_id not in all_exclusions] + if verbose: click.echo( - f"Excluded {len(exclude)} attribute(s): {', '.join(sorted(exclude))}\n" + f"Excluded {len(all_exclusions)} attribute(s): {', '.join(sorted(all_exclusions))}\n" ) else: assessors = all_assessors diff --git a/tests/e2e/test_critical_paths.py b/tests/e2e/test_critical_paths.py index ae94e278..5b578173 100644 --- a/tests/e2e/test_critical_paths.py +++ b/tests/e2e/test_critical_paths.py @@ -275,12 +275,13 @@ def test_assess_with_valid_config(self): """E2E: Verify assessment works with valid config file.""" with tempfile.TemporaryDirectory() as tmp_dir: # Create valid config file + # Note: Use a valid attribute ID (openapi_specs) for excluded_attributes config_file = Path(tmp_dir) / "config.yaml" config_file.write_text(""" weights: - claude_md: 2.0 + claude_md_file: 2.0 excluded_attributes: - - repomix_config + - openapi_specs """) output_dir = Path(tmp_dir) / "output" @@ -303,14 +304,14 @@ def test_assess_with_valid_config(self): assert result.returncode == 0 assert "Assessment complete" in result.stdout - # Verify config was applied (repomix_config should be excluded) + # Verify config was applied (openapi_specs should be excluded) json_file = output_dir / "assessment-latest.json" with open(json_file) as f: data = json.load(f) - # Check that repomix_config is not in findings + # Check that openapi_specs is not in findings finding_ids = [f["attribute"]["id"] for f in data["findings"]] - assert "repomix_config" not in finding_ids + assert "openapi_specs" not in finding_ids class TestCriticalSecurityFeatures: diff --git a/tests/unit/cli/test_main.py b/tests/unit/cli/test_main.py index 2ffb9160..40e999af 100644 --- a/tests/unit/cli/test_main.py +++ b/tests/unit/cli/test_main.py @@ -661,3 +661,197 @@ def test_run_assessment_function(self, test_repo, mock_assessment): # Should have created reports assert (test_repo / ".agentready").exists() mock_scanner.scan.assert_called_once() + + def test_run_assessment_excludes_attributes_from_config_issue_302( + self, test_repo, mock_assessment, tmp_path + ): + """Test that excluded_attributes in config file are honored. + + Regression test for issue #302: excluded_attributes in config file + was previously ignored - only --exclude CLI flag worked. + """ + # Create config with excluded_attributes + config_file = tmp_path / "config.yaml" + config_file.write_text(""" +excluded_attributes: + - test_coverage + - openapi_specs +""") + + with patch("agentready.cli.main.Scanner") as mock_scanner_class: + mock_scanner = MagicMock() + mock_scanner.scan.return_value = mock_assessment + mock_scanner_class.return_value = mock_scanner + + # Call run_assessment with config + run_assessment( + str(test_repo), + verbose=False, + output_dir=None, + config_path=str(config_file), + ) + + # Verify scan was called + mock_scanner.scan.assert_called_once() + + # Get the assessors passed to scan using modern mock API + call_args = mock_scanner.scan.call_args + assessors = call_args.args[0] # More robust than call_args[0][0] + + # Verify excluded attributes are NOT in the assessor list + assessor_ids = {a.attribute_id for a in assessors} + assert ( + "test_coverage" not in assessor_ids + ), "test_coverage should be excluded by config" + assert ( + "openapi_specs" not in assessor_ids + ), "openapi_specs should be excluded by config" + + def test_run_assessment_merges_cli_and_config_exclusions_issue_302( + self, test_repo, mock_assessment, tmp_path + ): + """Test that CLI --exclude and config excluded_attributes are merged. + + Regression test for issue #302: both sources of exclusions should + be combined, not one overriding the other. + """ + # Create config with one exclusion + config_file = tmp_path / "config.yaml" + config_file.write_text(""" +excluded_attributes: + - test_coverage +""") + + with patch("agentready.cli.main.Scanner") as mock_scanner_class: + mock_scanner = MagicMock() + mock_scanner.scan.return_value = mock_assessment + mock_scanner_class.return_value = mock_scanner + + # Call run_assessment with config AND CLI exclusion + run_assessment( + str(test_repo), + verbose=False, + output_dir=None, + config_path=str(config_file), + exclude=["openapi_specs"], # CLI exclusion + ) + + # Get the assessors passed to scan using modern mock API + call_args = mock_scanner.scan.call_args + assessors = call_args.args[0] # More robust than call_args[0][0] + + # Verify BOTH exclusions are applied + assessor_ids = {a.attribute_id for a in assessors} + assert ( + "test_coverage" not in assessor_ids + ), "test_coverage should be excluded by config" + assert ( + "openapi_specs" not in assessor_ids + ), "openapi_specs should be excluded by CLI" + + def test_run_assessment_validates_config_excluded_attributes_issue_302( + self, test_repo, tmp_path + ): + """Test that invalid attribute IDs in config are rejected. + + Regression test for issue #302: invalid attribute IDs in config + were previously silently ignored - now they should raise an error. + Config-sourced errors use ClickException (not BadParameter) for clarity. + """ + import click + + # Create config with invalid attribute ID + config_file = tmp_path / "config.yaml" + config_file.write_text(""" +excluded_attributes: + - nonexistent_attribute + - test_coverage +""") + + with patch("agentready.cli.main.Scanner") as mock_scanner_class: + mock_scanner = MagicMock() + mock_scanner_class.return_value = mock_scanner + + # Should raise ClickException for invalid attribute ID from config + with pytest.raises(click.exceptions.ClickException) as exc_info: + run_assessment( + str(test_repo), + verbose=False, + output_dir=None, + config_path=str(config_file), + ) + + # Verify error message mentions the invalid ID and source + assert "nonexistent_attribute" in str(exc_info.value) + assert "config file" in str(exc_info.value) + + def test_run_assessment_verbose_echoes_config_exclusions_issue_302( + self, runner, test_repo, mock_assessment, tmp_path + ): + """Test that verbose mode echoes exclusions from config file. + + Regression test for issue #302: verbose output should show exclusions + even when they come only from config (not CLI). + Uses CliRunner for reliable Click output capture. + """ + # Create config with exclusions + config_file = tmp_path / "config.yaml" + config_file.write_text(""" +excluded_attributes: + - test_coverage +""") + + with patch("agentready.cli.main.Scanner") as mock_scanner_class: + mock_scanner = MagicMock() + mock_scanner.scan.return_value = mock_assessment + mock_scanner_class.return_value = mock_scanner + + # Use CliRunner for reliable Click output capture + result = runner.invoke( + assess, + [str(test_repo), "--verbose", "--config", str(config_file)], + ) + + # Verify exclusion message in output + assert result.exit_code == 0 + assert "Excluded 1 attribute(s)" in result.output + assert "test_coverage" in result.output + + def test_run_assessment_validates_mixed_source_invalid_attrs_issue_302( + self, test_repo, tmp_path + ): + """Test that invalid IDs from both CLI and config are reported together. + + Regression test for issue #302: when both sources have invalid IDs, + the error message should include all of them so the user can fix + both in one round trip. + """ + import click + + # Create config with invalid attribute ID + config_file = tmp_path / "config.yaml" + config_file.write_text(""" +excluded_attributes: + - invalid_config_attr + - test_coverage +""") + + with patch("agentready.cli.main.Scanner") as mock_scanner_class: + mock_scanner = MagicMock() + mock_scanner_class.return_value = mock_scanner + + # Should raise ClickException with both invalid IDs mentioned + with pytest.raises(click.exceptions.ClickException) as exc_info: + run_assessment( + str(test_repo), + verbose=False, + output_dir=None, + config_path=str(config_file), + exclude=["invalid_cli_attr"], # CLI invalid ID + ) + + error_msg = str(exc_info.value) + # Verify both sources are mentioned + assert "invalid_cli_attr" in error_msg + assert "invalid_config_attr" in error_msg + assert "Also invalid in config file" in error_msg