diff --git a/pyproject.toml b/pyproject.toml index 64df7d92..575796d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ dev = [ "black>=23.0.0", "isort>=5.12.0", "flake8>=6.0.0", + "ruff>=0.1.0", ] [project.scripts] diff --git a/scripts/backlog_to_issues.py b/scripts/backlog_to_issues.py index b1c82251..f2239bb4 100755 --- a/scripts/backlog_to_issues.py +++ b/scripts/backlog_to_issues.py @@ -10,12 +10,12 @@ 5. Pauses after first item for user review """ +import json import re import subprocess import sys from pathlib import Path -from typing import List, Dict, Optional -import json +from typing import Dict, List, Optional class BacklogItem: @@ -34,34 +34,33 @@ def __repr__(self): def parse_backlog(backlog_path: Path) -> List[BacklogItem]: """Parse BACKLOG.md and extract all items.""" - with open(backlog_path, 'r') as f: + with open(backlog_path, "r") as f: content = f.read() items = [] # Split into sections by ### - sections = re.split(r'\n### ', content) + sections = re.split(r"\n### ", content) for i, section in enumerate(sections[1:], 1): # Skip first section (header) - lines = section.split('\n') + lines = section.split("\n") title = lines[0].strip() # Find priority in next few lines priority = "P4" # default for line in lines[1:5]: - if match := re.search(r'\*\*Priority\*\*:\s*(P\d)', line): + if match := re.search(r"\*\*Priority\*\*:\s*(P\d)", line): priority = match.group(1) break # Get full content until next ### or end - full_content = '\n'.join(lines) + full_content = "\n".join(lines) - items.append(BacklogItem( - title=title, - priority=priority, - content=full_content, - section_start=i - )) + items.append( + BacklogItem( + title=title, priority=priority, content=full_content, section_start=i + ) + ) return items @@ -231,7 +230,9 @@ def generate_coldstart_prompt(item: BacklogItem, repo_context: Dict) -> str: return prompt -def create_github_issue(item: BacklogItem, prompt: str, repo_context: Dict, dry_run: bool = False) -> Optional[str]: +def create_github_issue( + item: BacklogItem, prompt: str, repo_context: Dict, dry_run: bool = False +) -> Optional[str]: """Create GitHub issue via gh CLI and attach coldstart prompt as comment.""" # Prepare issue title @@ -243,62 +244,64 @@ def create_github_issue(item: BacklogItem, prompt: str, repo_context: Dict, dry_ body_parts.append(f"**Priority**: {item.priority}\n") # Extract description (first paragraph after Priority) - lines = item.content.split('\n') + lines = item.content.split("\n") in_description = False description_lines = [] for line in lines: - if '**Description**:' in line: + if "**Description**:" in line: in_description = True continue if in_description: - if line.startswith('**') and ':' in line: + if line.startswith("**") and ":" in line: break description_lines.append(line) if description_lines: body_parts.append("## Description\n") - body_parts.append('\n'.join(description_lines)) + body_parts.append("\n".join(description_lines)) # Add link to full context body_parts.append("\n\n## Full Context\n") - body_parts.append(f"See [BACKLOG.md](https://github.com/{repo_context['owner']}/{repo_context['repo']}/blob/main/BACKLOG.md) for complete requirements.\n") + body_parts.append( + f"See [BACKLOG.md](https://github.com/{repo_context['owner']}/{repo_context['repo']}/blob/main/BACKLOG.md) for complete requirements.\n" + ) # Add acceptance criteria if present - if '**Acceptance Criteria**:' in item.content: - criteria_start = item.content.find('**Acceptance Criteria**:') - criteria_section = item.content[criteria_start:criteria_start+1000] + if "**Acceptance Criteria**:" in item.content: + criteria_start = item.content.find("**Acceptance Criteria**:") + criteria_section = item.content[criteria_start : criteria_start + 1000] body_parts.append("\n## Acceptance Criteria\n") - body_parts.append(criteria_section.split('\n\n')[0]) + body_parts.append(criteria_section.split("\n\n")[0]) - issue_body = '\n'.join(body_parts) + issue_body = "\n".join(body_parts) # Determine labels labels = [f"priority:{item.priority.lower()}"] # Add category labels based on title/content - if 'security' in item.title.lower() or 'xss' in item.content.lower(): - labels.append('security') - if 'bug' in item.title.lower() or 'fix' in item.title.lower(): - labels.append('bug') + if "security" in item.title.lower() or "xss" in item.content.lower(): + labels.append("security") + if "bug" in item.title.lower() or "fix" in item.title.lower(): + labels.append("bug") else: - labels.append('enhancement') - if 'test' in item.title.lower(): - labels.append('testing') - if 'github' in item.title.lower(): - labels.append('github-integration') - if 'report' in item.title.lower(): - labels.append('reporting') + labels.append("enhancement") + if "test" in item.title.lower(): + labels.append("testing") + if "github" in item.title.lower(): + labels.append("github-integration") + if "report" in item.title.lower(): + labels.append("reporting") - labels_str = ','.join(labels) + labels_str = ",".join(labels) if dry_run: print(f"\n{'='*80}") - print(f"DRY RUN: Would create issue:") + print("DRY RUN: Would create issue:") print(f"Title: {issue_title}") print(f"Labels: {labels_str}") print(f"Body preview:\n{issue_body[:500]}...") - print(f"\nColdstart prompt would be added as first comment") + print("\nColdstart prompt would be added as first comment") print(f"{'='*80}\n") return None @@ -307,34 +310,43 @@ def create_github_issue(item: BacklogItem, prompt: str, repo_context: Dict, dry_ # Create the issue result = subprocess.run( [ - 'gh', 'issue', 'create', - '--title', issue_title, - '--body', issue_body, - '--label', labels_str + "gh", + "issue", + "create", + "--title", + issue_title, + "--body", + issue_body, + "--label", + labels_str, ], capture_output=True, text=True, - check=True + check=True, ) issue_url = result.stdout.strip() print(f"āœ… Created issue: {issue_url}") # Extract issue number from URL - issue_number = issue_url.split('/')[-1] + issue_number = issue_url.split("/")[-1] # Add coldstart prompt as first comment subprocess.run( [ - 'gh', 'issue', 'comment', issue_number, - '--body', f"## šŸ¤– Coldstart Implementation Prompt\n\n{prompt}" + "gh", + "issue", + "comment", + issue_number, + "--body", + f"## šŸ¤– Coldstart Implementation Prompt\n\n{prompt}", ], capture_output=True, text=True, - check=True + check=True, ) - print(f"āœ… Added coldstart prompt as comment") + print("āœ… Added coldstart prompt as comment") return issue_url @@ -347,40 +359,39 @@ def get_repo_context() -> Dict: """Get repository context (owner, repo name) from git remote.""" try: result = subprocess.run( - ['gh', 'repo', 'view', '--json', 'owner,name'], + ["gh", "repo", "view", "--json", "owner,name"], capture_output=True, text=True, - check=True + check=True, ) data = json.loads(result.stdout) - return { - 'owner': data['owner']['login'], - 'repo': data['name'] - } - except Exception as e: + return {"owner": data["owner"]["login"], "repo": data["name"]} + except Exception: # No git remote - ask user or use default - print(f"āš ļø Warning: Could not get repo context from git remote") - print(f" This is expected if repository not yet on GitHub") - print(f" Using default values for now\n") + print("āš ļø Warning: Could not get repo context from git remote") + print(" This is expected if repository not yet on GitHub") + print(" Using default values for now\n") # For agentready, we know the intended location - return {'owner': 'redhat', 'repo': 'agentready'} + return {"owner": "redhat", "repo": "agentready"} -def save_prompt_to_file(item: BacklogItem, prompt: str, output_dir: Path, item_number: int) -> Path: +def save_prompt_to_file( + item: BacklogItem, prompt: str, output_dir: Path, item_number: int +) -> Path: """Save coldstart prompt to markdown file.""" # Create output directory if it doesn't exist output_dir.mkdir(parents=True, exist_ok=True) # Generate filename from item number and title - safe_title = re.sub(r'[^\w\s-]', '', item.title.lower()) - safe_title = re.sub(r'[-\s]+', '-', safe_title)[:50] + safe_title = re.sub(r"[^\w\s-]", "", item.title.lower()) + safe_title = re.sub(r"[-\s]+", "-", safe_title)[:50] filename = f"{item_number:02d}-{safe_title}.md" filepath = output_dir / filename # Write prompt to file - with open(filepath, 'w') as f: + with open(filepath, "w") as f: f.write(prompt) return filepath @@ -390,19 +401,19 @@ def main(): """Main script execution.""" # Parse command line args - create_issues = '--create-issues' in sys.argv - process_all = '--all' in sys.argv + create_issues = "--create-issues" in sys.argv + process_all = "--all" in sys.argv # Get repository root repo_root = Path(__file__).parent.parent - backlog_path = repo_root / 'BACKLOG.md' + backlog_path = repo_root / "BACKLOG.md" if not backlog_path.exists(): print(f"āŒ BACKLOG.md not found at {backlog_path}") sys.exit(1) # Create output directory - output_dir = repo_root / '.github' / 'coldstart-prompts' + output_dir = repo_root / ".github" / "coldstart-prompts" # Get repo context repo_context = get_repo_context() @@ -444,20 +455,22 @@ def main(): if issue_url: print(f"āœ… Created issue: {issue_url}\n") else: - print(f"āŒ Failed to create issue\n") + print("āŒ Failed to create issue\n") # Pause after first item unless --all specified if not process_all and idx == 1: print(f"\n{'='*80}") - print(f"āœ… FIRST PROMPT GENERATED") + print("āœ… FIRST PROMPT GENERATED") print(f"{'='*80}\n") print(f"Saved to: {filepath}") - print(f"\nPlease review the prompt file.") - print(f"Once approved, run with --all to process remaining {len(items) - 1} items:") - print(f" python scripts/backlog_to_issues.py --all") + print("\nPlease review the prompt file.") + print( + f"Once approved, run with --all to process remaining {len(items) - 1} items:" + ) + print(" python scripts/backlog_to_issues.py --all") if not create_issues: - print(f"\nTo also create GitHub issues, add --create-issues flag:") - print(f" python scripts/backlog_to_issues.py --all --create-issues") + print("\nTo also create GitHub issues, add --create-issues flag:") + print(" python scripts/backlog_to_issues.py --all --create-issues") return # All items processed @@ -466,12 +479,12 @@ def main(): print(f"{'='*80}\n") print(f"Coldstart prompts saved to: {output_dir}/") if create_issues: - print(f"GitHub issues created (check repository)") - print(f"\nNext steps:") + print("GitHub issues created (check repository)") + print("\nNext steps:") print(f" 1. Review generated prompts in {output_dir}/") - print(f" 2. Create GitHub issues manually, or run with --create-issues") - print(f" 3. Start implementing features using the coldstart prompts!") + print(" 2. Create GitHub issues manually, or run with --create-issues") + print(" 3. Start implementing features using the coldstart prompts!") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/src/agentready/assessors/code_quality.py b/src/agentready/assessors/code_quality.py index 2f47fa4c..931ecfe3 100644 --- a/src/agentready/assessors/code_quality.py +++ b/src/agentready/assessors/code_quality.py @@ -1,7 +1,6 @@ """Code quality assessors for complexity, file length, type annotations, and code smells.""" import subprocess -from pathlib import Path from ..models.attribute import Attribute from ..models.finding import Citation, Finding, Remediation @@ -326,7 +325,7 @@ def _assess_python_complexity(self, repository: Repository) -> Finding: self.attribute, reason="No Python code to analyze" ) - except MissingToolError as e: + except MissingToolError: raise # Re-raise to be caught by Scanner except Exception as e: return Finding.error( @@ -352,7 +351,7 @@ def _assess_with_lizard(self, repository: Repository) -> Finding: self.attribute, reason="Lizard analysis not fully implemented" ) - except MissingToolError as e: + except MissingToolError: raise except Exception as e: return Finding.error( diff --git a/src/agentready/assessors/documentation.py b/src/agentready/assessors/documentation.py index 27d2c533..b64f91e5 100644 --- a/src/agentready/assessors/documentation.py +++ b/src/agentready/assessors/documentation.py @@ -1,7 +1,5 @@ """Documentation assessor for CLAUDE.md, README, docstrings, and ADRs.""" -from pathlib import Path - from ..models.attribute import Attribute from ..models.finding import Citation, Finding, Remediation from ..models.repository import Repository diff --git a/src/agentready/assessors/repomix.py b/src/agentready/assessors/repomix.py index 2c1255b9..879697ca 100644 --- a/src/agentready/assessors/repomix.py +++ b/src/agentready/assessors/repomix.py @@ -1,7 +1,8 @@ """Repomix configuration assessor.""" from ..models.attribute import Attribute -from ..models.finding import Finding +from ..models.citation import Citation +from ..models.finding import Finding, Remediation from ..models.repository import Repository from ..services.repomix import RepomixService from .base import BaseAssessor @@ -50,58 +51,75 @@ def assess(self, repository: Repository) -> Finding: # Check if Repomix is configured if not service.has_config(): - return Finding.create_fail( - self.attribute, - score=0, + return Finding( + attribute=self.attribute, + status="fail", + score=0.0, + measured_value="not configured", + threshold="configured", evidence=[ "Repomix configuration not found", "Missing repomix.config.json", ], - remediation_steps=[ - "Initialize Repomix: agentready repomix-generate --init", - "Generate context: agentready repomix-generate", - "Add to bootstrap: agentready bootstrap --repomix", - "Set up GitHub Action for automatic updates", - ], - tools=["Repomix", "AgentReady"], - commands=[ - "agentready repomix-generate --init", - "agentready repomix-generate", - ], - examples=[ - "# Initialize Repomix configuration\n" - "agentready repomix-generate --init\n\n" - "# Generate repository context\n" - "agentready repomix-generate\n\n" - "# Check freshness\n" - "agentready repomix-generate --check" - ], - citations=[ - { - "title": "Repomix - AI-Friendly Repository Packager", - "url": "https://github.com/yamadashy/repomix", - "type": "tool", - } - ], + remediation=Remediation( + summary="Configure Repomix for AI-friendly context generation", + steps=[ + "Initialize Repomix: agentready repomix-generate --init", + "Generate context: agentready repomix-generate", + "Add to bootstrap: agentready bootstrap --repomix", + "Set up GitHub Action for automatic updates", + ], + tools=["Repomix", "AgentReady"], + commands=[ + "agentready repomix-generate --init", + "agentready repomix-generate", + ], + examples=[ + "# Initialize Repomix configuration\n" + "agentready repomix-generate --init\n\n" + "# Generate repository context\n" + "agentready repomix-generate\n\n" + "# Check freshness\n" + "agentready repomix-generate --check" + ], + citations=[ + Citation( + source="Repomix", + title="Repomix - AI-Friendly Repository Packager", + url="https://github.com/yamadashy/repomix", + relevance="AI-friendly repository context generation tool", + ) + ], + ), + error_message=None, ) # Check if output exists and is fresh output_files = service.get_output_files() if not output_files: - return Finding.create_fail( - self.attribute, - score=50, + return Finding( + attribute=self.attribute, + status="fail", + score=50.0, + measured_value="configured but no output", + threshold="configured with fresh output", evidence=[ "Repomix configuration exists", "No Repomix output files found", ], - remediation_steps=[ - "Generate Repomix output: agentready repomix-generate", - "Commit output if needed for team access", - "Set up GitHub Action for automatic regeneration", - ], - tools=["Repomix"], - commands=["agentready repomix-generate"], + remediation=Remediation( + summary="Generate Repomix output", + steps=[ + "Generate Repomix output: agentready repomix-generate", + "Commit output if needed for team access", + "Set up GitHub Action for automatic regeneration", + ], + tools=["Repomix"], + commands=["agentready repomix-generate"], + examples=[], + citations=[], + ), + error_message=None, ) # Check freshness (7 days max age) @@ -109,30 +127,44 @@ def assess(self, repository: Repository) -> Finding: if is_fresh: # All good - config exists, output exists and is fresh - return Finding.create_pass( - self.attribute, - score=100, + return Finding( + attribute=self.attribute, + status="pass", + score=100.0, + measured_value="fresh output", + threshold="output < 7 days old", evidence=[ "Repomix configuration exists", f"{len(output_files)} output file(s) found", message, ], + remediation=None, + error_message=None, ) else: # Config exists, output exists but is stale - return Finding.create_fail( - self.attribute, - score=75, + return Finding( + attribute=self.attribute, + status="fail", + score=75.0, + measured_value="stale output", + threshold="output < 7 days old", evidence=[ "Repomix configuration exists", f"{len(output_files)} output file(s) found", message, ], - remediation_steps=[ - "Regenerate Repomix output: agentready repomix-generate", - "Set up GitHub Action for automatic weekly updates", - "Add to pre-commit hooks for automatic regeneration", - ], - tools=["Repomix"], - commands=["agentready repomix-generate"], + remediation=Remediation( + summary="Regenerate stale Repomix output", + steps=[ + "Regenerate Repomix output: agentready repomix-generate", + "Set up GitHub Action for automatic weekly updates", + "Add to pre-commit hooks for automatic regeneration", + ], + tools=["Repomix"], + commands=["agentready repomix-generate"], + examples=[], + citations=[], + ), + error_message=None, ) diff --git a/src/agentready/assessors/structure.py b/src/agentready/assessors/structure.py index f29dbc3e..c150280c 100644 --- a/src/agentready/assessors/structure.py +++ b/src/agentready/assessors/structure.py @@ -1,7 +1,5 @@ """Structure assessors for project layout and separation of concerns.""" -from pathlib import Path - from ..models.attribute import Attribute from ..models.finding import Citation, Finding, Remediation from ..models.repository import Repository diff --git a/src/agentready/assessors/stub_assessors.py b/src/agentready/assessors/stub_assessors.py index c2a380e7..74f81fa3 100644 --- a/src/agentready/assessors/stub_assessors.py +++ b/src/agentready/assessors/stub_assessors.py @@ -4,10 +4,8 @@ enhanced later with more sophisticated detection and scoring logic. """ -from pathlib import Path - from ..models.attribute import Attribute -from ..models.finding import Citation, Finding, Remediation +from ..models.finding import Finding, Remediation from ..models.repository import Repository from .base import BaseAssessor diff --git a/src/agentready/assessors/testing.py b/src/agentready/assessors/testing.py index c3f02d95..926ddd4b 100644 --- a/src/agentready/assessors/testing.py +++ b/src/agentready/assessors/testing.py @@ -1,9 +1,5 @@ """Testing assessors for test coverage, naming conventions, and pre-commit hooks.""" -import re -import subprocess -from pathlib import Path - from ..models.attribute import Attribute from ..models.finding import Citation, Finding, Remediation from ..models.repository import Repository diff --git a/src/agentready/cli/align.py b/src/agentready/cli/align.py new file mode 100644 index 00000000..da074325 --- /dev/null +++ b/src/agentready/cli/align.py @@ -0,0 +1,189 @@ +"""Align command for automated remediation.""" + +import sys +from pathlib import Path + +import click + +from ..models.config import Config +from ..models.repository import Repository +from ..services.fixer_service import FixerService +from ..services.language_detector import LanguageDetector +from ..services.scanner import Scanner + + +def get_certification_level(score: float) -> tuple[str, str]: + """Get certification level and emoji for score. + + Args: + score: Score 0-100 + + Returns: + Tuple of (level_name, emoji) + """ + if score >= 90: + return ("Platinum", "šŸ’Ž") + elif score >= 75: + return ("Gold", "šŸ„‡") + elif score >= 60: + return ("Silver", "🄈") + elif score >= 40: + return ("Bronze", "šŸ„‰") + else: + return ("Needs Improvement", "šŸ“Š") + + +@click.command() +@click.argument("repository", type=click.Path(exists=True), default=".") +@click.option( + "--dry-run", + is_flag=True, + help="Preview changes without applying them", +) +@click.option( + "--attributes", + help="Comma-separated attribute IDs to fix (default: all)", +) +@click.option( + "--interactive", + "-i", + is_flag=True, + help="Confirm each fix before applying", +) +def align(repository, dry_run, attributes, interactive): + """Align repository with best practices by applying automatic fixes. + + Runs assessment, identifies failing attributes, and automatically generates + and applies fixes to improve the repository's agent-ready score. + + REPOSITORY: Path to repository (default: current directory) + """ + repo_path = Path(repository).resolve() + + # Validate git repository + if not (repo_path / ".git").exists(): + click.echo("Error: Not a git repository", err=True) + sys.exit(1) + + click.echo("šŸ”§ AgentReady Align") + click.echo("=" * 60) + click.echo(f"\nRepository: {repo_path}") + if dry_run: + click.echo("Mode: DRY RUN (preview only)\n") + else: + click.echo("Mode: APPLY FIXES\n") + + # Step 1: Run assessment + click.echo("šŸ“Š Running assessment...") + try: + # Create repository model + detector = LanguageDetector(repo_path) + languages = detector.detect_languages() + + repo = Repository( + path=repo_path, + languages=languages, + metadata={}, + ) + + # Load config + config = Config.load_default() + + # Run assessment + scanner = Scanner(config=config) + assessment = scanner.scan(repo) + + current_level, current_emoji = get_certification_level(assessment.overall_score) + + click.echo( + f"Current Score: {assessment.overall_score:.1f}/100 ({current_level} {current_emoji})" + ) + click.echo(f"Attributes Assessed: {len(assessment.findings)}") + click.echo( + f"Failing Attributes: {sum(1 for f in assessment.findings if f.status == 'fail')}\n" + ) + + except Exception as e: + click.echo(f"\nError during assessment: {str(e)}", err=True) + sys.exit(1) + + # Step 2: Generate fix plan + click.echo("šŸ” Analyzing fixable issues...") + + attribute_list = None + if attributes: + attribute_list = [a.strip() for a in attributes.split(",")] + + fixer_service = FixerService() + fix_plan = fixer_service.generate_fix_plan(assessment, repo, attribute_list) + + if not fix_plan.fixes: + click.echo("\nāœ… No automatic fixes available.") + click.echo( + "All fixable attributes are passing, or failing attributes require manual remediation." + ) + sys.exit(0) + + # Show fix plan + projected_level, projected_emoji = get_certification_level(fix_plan.projected_score) + + click.echo(f"\nFixes Available: {len(fix_plan.fixes)}") + click.echo(f"Points to Gain: +{fix_plan.points_gained:.1f}") + click.echo( + f"Projected Score: {fix_plan.projected_score:.1f}/100 ({projected_level} {projected_emoji})\n" + ) + + click.echo("Changes to be applied:\n") + for i, fix in enumerate(fix_plan.fixes, 1): + click.echo(f" {i}. [{fix.attribute_id}] {fix.description}") + click.echo(f" {fix.preview()}") + click.echo(f" Points: +{fix.points_gained:.1f}\n") + + # Step 3: Confirm or apply + if dry_run: + click.echo("=" * 60) + click.echo("\nDry run complete! Run without --dry-run to apply fixes.") + return + + # Interactive mode: confirm each fix + fixes_to_apply = [] + if interactive: + click.echo("=" * 60) + click.echo("\nInteractive mode: Confirm each fix\n") + for fix in fix_plan.fixes: + if click.confirm(f"Apply fix: {fix.description}?", default=True): + fixes_to_apply.append(fix) + click.echo() + else: + # Confirm all + if not click.confirm("\nApply all fixes?", default=True): + click.echo("Aborted.") + sys.exit(0) + fixes_to_apply = fix_plan.fixes + + if not fixes_to_apply: + click.echo("No fixes selected. Aborted.") + sys.exit(0) + + # Step 4: Apply fixes + click.echo(f"\nšŸ”Ø Applying {len(fixes_to_apply)} fixes...\n") + + results = fixer_service.apply_fixes(fixes_to_apply, dry_run=False) + + # Report results + click.echo("=" * 60) + click.echo(f"\nāœ… Fixes applied: {results['succeeded']}/{len(fixes_to_apply)}") + + if results["failed"] > 0: + click.echo(f"āŒ Fixes failed: {results['failed']}") + click.echo("\nFailures:") + for failure in results["failures"]: + click.echo(f" - {failure}") + + click.echo("\nNext steps:") + click.echo(" 1. Review changes: git status") + click.echo(" 2. Test the changes") + click.echo( + " 3. Commit: git add . && git commit -m 'chore: Apply AgentReady fixes'" + ) + click.echo(" 4. Run assessment again: agentready assess .") diff --git a/src/agentready/cli/demo.py b/src/agentready/cli/demo.py index dea04759..c90ad7ba 100644 --- a/src/agentready/cli/demo.py +++ b/src/agentready/cli/demo.py @@ -458,10 +458,14 @@ def demo(language, no_browser, keep_repo): click.echo() # Display score with color based on level - score_color = "green" if overall_score >= 75 else "yellow" if overall_score >= 60 else "red" - click.echo(f" Overall Score: ", nl=False) + score_color = ( + "green" + if overall_score >= 75 + else "yellow" if overall_score >= 60 else "red" + ) + click.echo(" Overall Score: ", nl=False) click.secho(f"{overall_score:.1f}/100", fg=score_color, bold=True) - click.echo(f" Certification: ", nl=False) + click.echo(" Certification: ", nl=False) click.secho(certification_level, fg=score_color, bold=True) click.echo(f" Assessed: {assessed}/25 attributes") click.echo(f" Skipped: {skipped} attributes") @@ -537,7 +541,7 @@ def demo(language, no_browser, keep_repo): click.echo("Next steps:") click.echo(f" • View HTML report: {html_file}") click.echo(f" • View Markdown report: {md_file}") - click.echo(f" • Assess your own repo: agentready assess /path/to/repo") + click.echo(" • Assess your own repo: agentready assess /path/to/repo") click.echo() if keep_repo: diff --git a/src/agentready/cli/main.py b/src/agentready/cli/main.py index 54e0f713..82846c86 100644 --- a/src/agentready/cli/main.py +++ b/src/agentready/cli/main.py @@ -32,6 +32,7 @@ from ..reporters.markdown import MarkdownReporter from ..services.research_loader import ResearchLoader from ..services.scanner import Scanner +from .align import align from .bootstrap import bootstrap from .demo import demo from .learn import learn @@ -303,6 +304,7 @@ def generate_config(): # Register commands +cli.add_command(align) cli.add_command(bootstrap) cli.add_command(demo) cli.add_command(learn) diff --git a/src/agentready/fixers/__init__.py b/src/agentready/fixers/__init__.py new file mode 100644 index 00000000..9823aea1 --- /dev/null +++ b/src/agentready/fixers/__init__.py @@ -0,0 +1,5 @@ +"""Fixers for automated remediation of failing attributes.""" + +from agentready.fixers.base import BaseFixer + +__all__ = ["BaseFixer"] diff --git a/src/agentready/fixers/base.py b/src/agentready/fixers/base.py new file mode 100644 index 00000000..b03a7547 --- /dev/null +++ b/src/agentready/fixers/base.py @@ -0,0 +1,71 @@ +"""Base fixer interface for automated remediation.""" + +from abc import ABC, abstractmethod +from typing import Optional + +from ..models.finding import Finding +from ..models.fix import Fix +from ..models.repository import Repository + + +class BaseFixer(ABC): + """Abstract base class for all attribute fixers. + + Each fixer knows how to automatically remediate a specific failing attribute + by generating files, modifying configurations, or executing commands. + + Fixers follow the strategy pattern and are stateless for easy testing. + """ + + @property + @abstractmethod + def attribute_id(self) -> str: + """Unique attribute identifier (e.g., 'claude_md_file'). + + Must match the attribute ID from assessors. + """ + pass + + @abstractmethod + def can_fix(self, finding: Finding) -> bool: + """Check if this fixer can fix the given finding. + + Args: + finding: Assessment finding for the attribute + + Returns: + True if this fixer can generate a fix, False otherwise + """ + pass + + @abstractmethod + def generate_fix(self, repository: Repository, finding: Finding) -> Optional[Fix]: + """Generate a fix for the failing attribute. + + Args: + repository: Repository entity with path, languages, metadata + finding: Failing finding to remediate + + Returns: + Fix object if one can be generated, None if cannot be fixed automatically + + Raises: + This method should NOT raise exceptions. Return None on errors. + """ + pass + + def estimate_score_improvement(self, finding: Finding) -> float: + """Estimate score points gained if fix is applied. + + Args: + finding: Failing finding + + Returns: + Estimated points (0-100) that would be gained + + Default implementation: Use attribute default_weight from finding. + """ + if finding.status == "fail" and finding.attribute.default_weight: + # Full weight if currently failing (0 points) + return finding.attribute.default_weight * 100 + return 0.0 diff --git a/src/agentready/fixers/documentation.py b/src/agentready/fixers/documentation.py new file mode 100644 index 00000000..67697791 --- /dev/null +++ b/src/agentready/fixers/documentation.py @@ -0,0 +1,104 @@ +"""Fixers for documentation-related attributes.""" + +from datetime import datetime +from pathlib import Path +from typing import Optional + +from jinja2 import Environment, PackageLoader + +from ..models.finding import Finding +from ..models.fix import FileCreationFix, Fix +from ..models.repository import Repository +from .base import BaseFixer + + +class CLAUDEmdFixer(BaseFixer): + """Fixer for missing CLAUDE.md file.""" + + def __init__(self): + """Initialize with Jinja2 environment.""" + self.env = Environment( + loader=PackageLoader("agentready", "templates/align"), + trim_blocks=True, + lstrip_blocks=True, + ) + + @property + def attribute_id(self) -> str: + """Return attribute ID.""" + return "claude_md_file" + + def can_fix(self, finding: Finding) -> bool: + """Check if CLAUDE.md is missing.""" + return finding.status == "fail" and finding.attribute.id == self.attribute_id + + def generate_fix(self, repository: Repository, finding: Finding) -> Optional[Fix]: + """Generate CLAUDE.md from template.""" + if not self.can_fix(finding): + return None + + # Load template + template = self.env.get_template("CLAUDE.md.j2") + + # Render with repository context + content = template.render( + repo_name=repository.path.name, + current_date=datetime.now().strftime("%Y-%m-%d"), + ) + + # Create fix + return FileCreationFix( + attribute_id=self.attribute_id, + description="Create CLAUDE.md with project documentation template", + points_gained=self.estimate_score_improvement(finding), + file_path=Path("CLAUDE.md"), + content=content, + repository_path=repository.path, + ) + + +class GitignoreFixer(BaseFixer): + """Fixer for incomplete .gitignore.""" + + def __init__(self): + """Initialize fixer.""" + self.template_path = ( + Path(__file__).parent.parent + / "templates" + / "align" + / "gitignore_additions.txt" + ) + + @property + def attribute_id(self) -> str: + """Return attribute ID.""" + return "gitignore_completeness" + + def can_fix(self, finding: Finding) -> bool: + """Check if .gitignore can be improved.""" + return finding.status == "fail" and finding.attribute.id == self.attribute_id + + def generate_fix(self, repository: Repository, finding: Finding) -> Optional[Fix]: + """Add missing patterns to .gitignore.""" + if not self.can_fix(finding): + return None + + # Load recommended patterns + if not self.template_path.exists(): + return None + + additions = self.template_path.read_text(encoding="utf-8").splitlines() + + # Import FileModificationFix + from ..models.fix import FileModificationFix + + # Create fix + return FileModificationFix( + attribute_id=self.attribute_id, + description="Add recommended patterns to .gitignore", + points_gained=self.estimate_score_improvement(finding), + file_path=Path(".gitignore"), + additions=additions, + repository_path=repository.path, + append=False, # Smart merge to avoid duplicates + ) diff --git a/src/agentready/fixers/testing.py b/src/agentready/fixers/testing.py new file mode 100644 index 00000000..204a057f --- /dev/null +++ b/src/agentready/fixers/testing.py @@ -0,0 +1,82 @@ +"""Fixers for testing-related attributes.""" + +from pathlib import Path +from typing import Optional + +from jinja2 import Environment, PackageLoader + +from ..models.finding import Finding +from ..models.fix import CommandFix, FileCreationFix, Fix, MultiStepFix +from ..models.repository import Repository +from .base import BaseFixer + + +class PrecommitHooksFixer(BaseFixer): + """Fixer for missing pre-commit hooks.""" + + def __init__(self): + """Initialize with Jinja2 environment.""" + self.env_bootstrap = Environment( + loader=PackageLoader("agentready", "templates/bootstrap"), + trim_blocks=True, + lstrip_blocks=True, + ) + + @property + def attribute_id(self) -> str: + """Return attribute ID.""" + return "precommit_hooks" + + def can_fix(self, finding: Finding) -> bool: + """Check if pre-commit hooks are missing.""" + return finding.status == "fail" and finding.attribute.id == self.attribute_id + + def generate_fix(self, repository: Repository, finding: Finding) -> Optional[Fix]: + """Generate .pre-commit-config.yaml and install hooks.""" + if not self.can_fix(finding): + return None + + # Determine primary language (use Python as default) + primary_lang = "python" + if repository.languages: + primary_lang = max( + repository.languages, key=repository.languages.get + ).lower() + + # Try to load language-specific template, fallback to python + try: + template = self.env_bootstrap.get_template( + f"precommit-{primary_lang}.yaml.j2" + ) + except Exception: + template = self.env_bootstrap.get_template("precommit-python.yaml.j2") + + content = template.render() + + # Create file creation fix + file_fix = FileCreationFix( + attribute_id=self.attribute_id, + description="Create .pre-commit-config.yaml", + points_gained=0, # Will be set by multi-step fix + file_path=Path(".pre-commit-config.yaml"), + content=content, + repository_path=repository.path, + ) + + # Create command to install hooks + install_fix = CommandFix( + attribute_id=self.attribute_id, + description="Install pre-commit hooks", + points_gained=0, + command="pre-commit install", + working_dir=None, + repository_path=repository.path, + ) + + # Combine into multi-step fix + return MultiStepFix( + attribute_id=self.attribute_id, + description="Set up pre-commit hooks (config + install)", + points_gained=self.estimate_score_improvement(finding), + steps=[file_fix, install_fix], + ) diff --git a/src/agentready/models/__init__.py b/src/agentready/models/__init__.py index 48b9a054..be622871 100644 --- a/src/agentready/models/__init__.py +++ b/src/agentready/models/__init__.py @@ -6,6 +6,13 @@ from agentready.models.config import Config from agentready.models.discovered_skill import DiscoveredSkill from agentready.models.finding import Finding +from agentready.models.fix import ( + CommandFix, + FileCreationFix, + FileModificationFix, + Fix, + MultiStepFix, +) from agentready.models.metadata import AssessmentMetadata from agentready.models.repository import Repository @@ -14,8 +21,13 @@ "AssessmentMetadata", "Attribute", "Citation", + "CommandFix", "Config", "DiscoveredSkill", + "FileCreationFix", + "FileModificationFix", "Finding", + "Fix", + "MultiStepFix", "Repository", ] diff --git a/src/agentready/models/fix.py b/src/agentready/models/fix.py new file mode 100644 index 00000000..6e0fdcde --- /dev/null +++ b/src/agentready/models/fix.py @@ -0,0 +1,191 @@ +"""Fix models for automated remediation.""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional + + +@dataclass +class Fix(ABC): + """Base class for automated fixes. + + Attributes: + attribute_id: ID of the attribute being fixed + description: Human-readable description of the fix + points_gained: Estimated points this fix will add to score + """ + + attribute_id: str + description: str + points_gained: float + + @abstractmethod + def apply(self, dry_run: bool = False) -> bool: + """Apply the fix to the repository. + + Args: + dry_run: If True, don't make changes, just validate + + Returns: + True if fix was applied successfully, False otherwise + """ + pass + + @abstractmethod + def preview(self) -> str: + """Generate a preview of what this fix will do. + + Returns: + Human-readable description of changes + """ + pass + + +@dataclass +class FileCreationFix(Fix): + """Fix that creates a new file. + + Attributes: + file_path: Path to create (relative to repository root) + content: File content to write + repository_path: Repository root path + """ + + file_path: Path + content: str + repository_path: Path + + def apply(self, dry_run: bool = False) -> bool: + """Create the file.""" + target_path = self.repository_path / self.file_path + + # Check if file already exists + if target_path.exists(): + return False + + if not dry_run: + # Create parent directories if needed + target_path.parent.mkdir(parents=True, exist_ok=True) + # Write content + target_path.write_text(self.content, encoding="utf-8") + + return True + + def preview(self) -> str: + """Preview file creation.""" + size_kb = len(self.content) / 1024 + return f"CREATE {self.file_path} ({size_kb:.1f} KB)" + + +@dataclass +class FileModificationFix(Fix): + """Fix that modifies an existing file. + + Attributes: + file_path: Path to modify (relative to repository root) + additions: Lines to add to the file + repository_path: Repository root path + append: If True, append additions; if False, use smart merge + """ + + file_path: Path + additions: List[str] + repository_path: Path + append: bool = True + + def apply(self, dry_run: bool = False) -> bool: + """Modify the file.""" + target_path = self.repository_path / self.file_path + + # Check if file exists + if not target_path.exists(): + return False + + if not dry_run: + if self.append: + # Append additions to end of file + with target_path.open("a", encoding="utf-8") as f: + for line in self.additions: + f.write(line + "\n") + else: + # Smart merge: avoid duplicates + existing = target_path.read_text(encoding="utf-8").splitlines() + existing_set = set(existing) + + with target_path.open("a", encoding="utf-8") as f: + for line in self.additions: + if line not in existing_set: + f.write(line + "\n") + + return True + + def preview(self) -> str: + """Preview file modification.""" + return f"MODIFY {self.file_path} (+{len(self.additions)} lines)" + + +@dataclass +class CommandFix(Fix): + """Fix that executes a command. + + Attributes: + command: Command to execute + working_dir: Directory to run command in + repository_path: Repository root path + """ + + command: str + working_dir: Optional[Path] + repository_path: Path + + def apply(self, dry_run: bool = False) -> bool: + """Execute the command.""" + if dry_run: + return True + + import subprocess + + cwd = self.working_dir or self.repository_path + + try: + subprocess.run( + self.command, + shell=True, + cwd=cwd, + check=True, + capture_output=True, + text=True, + ) + return True + except subprocess.CalledProcessError: + return False + + def preview(self) -> str: + """Preview command execution.""" + return f"RUN {self.command}" + + +@dataclass +class MultiStepFix(Fix): + """Fix composed of multiple steps. + + Attributes: + steps: Ordered list of fixes to apply + """ + + steps: List[Fix] + + def apply(self, dry_run: bool = False) -> bool: + """Apply all steps in order.""" + for step in self.steps: + if not step.apply(dry_run): + return False + return True + + def preview(self) -> str: + """Preview all steps.""" + lines = [f"MULTI-STEP FIX ({len(self.steps)} steps):"] + for i, step in enumerate(self.steps, 1): + lines.append(f" {i}. {step.preview()}") + return "\n".join(lines) diff --git a/src/agentready/services/fixer_service.py b/src/agentready/services/fixer_service.py new file mode 100644 index 00000000..3675e833 --- /dev/null +++ b/src/agentready/services/fixer_service.py @@ -0,0 +1,123 @@ +"""Service for orchestrating automated fixes.""" + +from dataclasses import dataclass +from typing import List + +from ..fixers.base import BaseFixer +from ..fixers.documentation import CLAUDEmdFixer, GitignoreFixer +from ..fixers.testing import PrecommitHooksFixer +from ..models.assessment import Assessment +from ..models.fix import Fix +from ..models.repository import Repository + + +@dataclass +class FixPlan: + """Plan for applying fixes to a repository. + + Attributes: + fixes: List of fixes to apply + current_score: Current assessment score + projected_score: Score after applying fixes + points_gained: Total points that would be gained + """ + + fixes: List[Fix] + current_score: float + projected_score: float + points_gained: float + + +class FixerService: + """Orchestrates automated remediation of failing attributes.""" + + def __init__(self): + """Initialize with all available fixers.""" + self.fixers: List[BaseFixer] = [ + CLAUDEmdFixer(), + GitignoreFixer(), + PrecommitHooksFixer(), + ] + + def generate_fix_plan( + self, + assessment: Assessment, + repository: Repository, + attribute_ids: List[str] = None, + ) -> FixPlan: + """Generate a plan for fixing failing attributes. + + Args: + assessment: Current assessment results + repository: Repository to fix + attribute_ids: Optional list of specific attribute IDs to fix. + If None, attempts to fix all failing attributes. + + Returns: + FixPlan with fixes and score projections + """ + fixes = [] + + # Identify failing findings + failing_findings = [f for f in assessment.findings if f.status == "fail"] + + # Filter by attribute IDs if specified + if attribute_ids: + failing_findings = [ + f for f in failing_findings if f.attribute.id in attribute_ids + ] + + # Generate fixes for each failing finding + for finding in failing_findings: + # Find fixer for this attribute + fixer = self._find_fixer(finding.attribute.id) + if fixer and fixer.can_fix(finding): + fix = fixer.generate_fix(repository, finding) + if fix: + fixes.append(fix) + + # Calculate score projection + points_gained = sum(f.points_gained for f in fixes) + projected_score = min(100.0, assessment.overall_score + points_gained) + + return FixPlan( + fixes=fixes, + current_score=assessment.overall_score, + projected_score=projected_score, + points_gained=points_gained, + ) + + def apply_fixes(self, fixes: List[Fix], dry_run: bool = False) -> dict: + """Apply a list of fixes. + + Args: + fixes: Fixes to apply + dry_run: If True, don't make changes + + Returns: + Dict with success counts and failures + """ + results = {"succeeded": 0, "failed": 0, "failures": []} + + for fix in fixes: + try: + success = fix.apply(dry_run=dry_run) + if success: + results["succeeded"] += 1 + else: + results["failed"] += 1 + results["failures"].append( + f"{fix.description}: Unable to apply fix" + ) + except Exception as e: + results["failed"] += 1 + results["failures"].append(f"{fix.description}: {str(e)}") + + return results + + def _find_fixer(self, attribute_id: str) -> BaseFixer: + """Find fixer for attribute ID.""" + for fixer in self.fixers: + if fixer.attribute_id == attribute_id: + return fixer + return None diff --git a/src/agentready/services/scanner.py b/src/agentready/services/scanner.py index 4a6da768..3b8c108e 100644 --- a/src/agentready/services/scanner.py +++ b/src/agentready/services/scanner.py @@ -163,7 +163,14 @@ def _build_repository_model(self, verbose: bool = False) -> Repository: # Git metadata repo = git.Repo(self.repository_path) name = self.repository_path.name - branch = repo.active_branch.name + + # Handle detached HEAD state (e.g., in CI/CD) + try: + branch = repo.active_branch.name + except TypeError: + # Detached HEAD - use commit hash or "HEAD" + branch = "HEAD" + commit_hash = repo.head.commit.hexsha # Get remote URL (if available) diff --git a/src/agentready/templates/align/CLAUDE.md.j2 b/src/agentready/templates/align/CLAUDE.md.j2 new file mode 100644 index 00000000..f267213e --- /dev/null +++ b/src/agentready/templates/align/CLAUDE.md.j2 @@ -0,0 +1,54 @@ +# {{ repo_name }} + +**Purpose**: [Describe what this repository does] + +**Last Updated**: {{ current_date }} + +--- + +## Overview + +[Provide a brief overview of the project, its goals, and main features] + +## Quick Start + +```bash +# Installation steps +# Usage examples +``` + +## Architecture + +[Describe the high-level architecture, key components, and design decisions] + +## Development + +### Setup + +```bash +# Development environment setup +``` + +### Running Tests + +```bash +# Test commands +``` + +### Code Quality + +```bash +# Linting and formatting commands +``` + +## Contributing + +[Contribution guidelines] + +## Technologies + +- [List key technologies, frameworks, and libraries] + +--- + +**Note for AI Agents**: This file helps AI understand the project structure, conventions, and workflows. diff --git a/src/agentready/templates/align/gitignore_additions.txt b/src/agentready/templates/align/gitignore_additions.txt new file mode 100644 index 00000000..418fcd8a --- /dev/null +++ b/src/agentready/templates/align/gitignore_additions.txt @@ -0,0 +1,69 @@ +# AgentReady recommended patterns + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Test coverage +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.pytest_cache/ + +# Logs +*.log + +# Databases +*.db +*.sqlite + +# Node +node_modules/ +package-lock.json +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/tests/integration/test_scan_workflow.py b/tests/integration/test_scan_workflow.py index 9b6a8085..1e3d1805 100644 --- a/tests/integration/test_scan_workflow.py +++ b/tests/integration/test_scan_workflow.py @@ -2,8 +2,6 @@ from pathlib import Path -import pytest - from agentready.assessors.documentation import CLAUDEmdAssessor, READMEAssessor from agentready.reporters.html import HTMLReporter from agentready.reporters.markdown import MarkdownReporter diff --git a/tests/unit/test_assessors_structure.py b/tests/unit/test_assessors_structure.py index 52500d31..fc51324b 100644 --- a/tests/unit/test_assessors_structure.py +++ b/tests/unit/test_assessors_structure.py @@ -1,9 +1,5 @@ """Tests for structure assessors.""" -from pathlib import Path - -import pytest - from agentready.assessors.structure import StandardLayoutAssessor from agentready.models.repository import Repository diff --git a/tests/unit/test_demo.py b/tests/unit/test_demo.py index 09067de4..9133abb1 100644 --- a/tests/unit/test_demo.py +++ b/tests/unit/test_demo.py @@ -3,7 +3,6 @@ import tempfile from pathlib import Path -import pytest from click.testing import CliRunner from agentready.cli.demo import create_demo_repository, demo diff --git a/tests/unit/test_fixers.py b/tests/unit/test_fixers.py new file mode 100644 index 00000000..c581e05d --- /dev/null +++ b/tests/unit/test_fixers.py @@ -0,0 +1,203 @@ +"""Unit tests for fixers.""" + +import tempfile +from pathlib import Path + +import pytest + +from agentready.fixers.documentation import CLAUDEmdFixer, GitignoreFixer +from agentready.models.attribute import Attribute +from agentready.models.finding import Finding, Remediation +from agentready.models.repository import Repository + + +@pytest.fixture +def temp_repo(): + """Create a temporary repository for testing.""" + with tempfile.TemporaryDirectory() as tmpdir: + repo_path = Path(tmpdir) + # Create .git directory to make it a valid repo + (repo_path / ".git").mkdir() + yield Repository( + path=repo_path, + name="test-repo", + url=None, + branch="main", + commit_hash="abc123", + languages={}, + total_files=0, + total_lines=0, + ) + + +@pytest.fixture +def claude_md_failing_finding(): + """Create a failing finding for CLAUDE.md.""" + attribute = Attribute( + id="claude_md_file", + name="CLAUDE.md File", + description="Repository has CLAUDE.md", + category="Documentation", + tier=1, + criteria="File exists", + default_weight=0.10, + ) + + remediation = Remediation( + summary="Create CLAUDE.md", + steps=["Create CLAUDE.md file"], + tools=[], + commands=[], + examples=[], + citations=[], + ) + + return Finding( + attribute=attribute, + status="fail", + score=0.0, + measured_value="Not found", + threshold="Present", + evidence=[], + remediation=remediation, + error_message=None, + ) + + +@pytest.fixture +def gitignore_failing_finding(): + """Create a failing finding for gitignore.""" + attribute = Attribute( + id="gitignore_completeness", + name="Gitignore Completeness", + description="Complete .gitignore patterns", + category="Version Control", + tier=2, + criteria=">90% patterns", + default_weight=0.03, + ) + + remediation = Remediation( + summary="Improve .gitignore", + steps=["Add recommended patterns"], + tools=[], + commands=[], + examples=[], + citations=[], + ) + + return Finding( + attribute=attribute, + status="fail", + score=50.0, + measured_value="50% coverage", + threshold=">90% coverage", + evidence=[], + remediation=remediation, + error_message=None, + ) + + +class TestCLAUDEmdFixer: + """Tests for CLAUDEmdFixer.""" + + def test_attribute_id(self): + """Test attribute ID matches.""" + fixer = CLAUDEmdFixer() + assert fixer.attribute_id == "claude_md_file" + + def test_can_fix_failing_finding(self, claude_md_failing_finding): + """Test can fix failing CLAUDE.md finding.""" + fixer = CLAUDEmdFixer() + assert fixer.can_fix(claude_md_failing_finding) is True + + def test_cannot_fix_passing_finding(self, claude_md_failing_finding): + """Test cannot fix passing finding.""" + fixer = CLAUDEmdFixer() + claude_md_failing_finding.status = "pass" + assert fixer.can_fix(claude_md_failing_finding) is False + + def test_generate_fix(self, temp_repo, claude_md_failing_finding): + """Test generating CLAUDE.md fix.""" + fixer = CLAUDEmdFixer() + fix = fixer.generate_fix(temp_repo, claude_md_failing_finding) + + assert fix is not None + assert fix.attribute_id == "claude_md_file" + assert fix.file_path == Path("CLAUDE.md") + assert "# " in fix.content # Has markdown header + assert fix.points_gained > 0 + + def test_apply_fix_dry_run(self, temp_repo, claude_md_failing_finding): + """Test applying fix in dry-run mode.""" + fixer = CLAUDEmdFixer() + fix = fixer.generate_fix(temp_repo, claude_md_failing_finding) + + result = fix.apply(dry_run=True) + assert result is True + + # File should NOT be created in dry run + assert not (temp_repo.path / "CLAUDE.md").exists() + + def test_apply_fix_real(self, temp_repo, claude_md_failing_finding): + """Test applying fix for real.""" + fixer = CLAUDEmdFixer() + fix = fixer.generate_fix(temp_repo, claude_md_failing_finding) + + result = fix.apply(dry_run=False) + assert result is True + + # File should be created + assert (temp_repo.path / "CLAUDE.md").exists() + + # Content should be valid + content = (temp_repo.path / "CLAUDE.md").read_text() + assert len(content) > 0 + assert "# " in content + + +class TestGitignoreFixer: + """Tests for GitignoreFixer.""" + + def test_attribute_id(self): + """Test attribute ID matches.""" + fixer = GitignoreFixer() + assert fixer.attribute_id == "gitignore_completeness" + + def test_can_fix_failing_finding(self, gitignore_failing_finding): + """Test can fix failing gitignore finding.""" + fixer = GitignoreFixer() + assert fixer.can_fix(gitignore_failing_finding) is True + + def test_generate_fix_requires_existing_gitignore( + self, temp_repo, gitignore_failing_finding + ): + """Test fix requires .gitignore to exist.""" + fixer = GitignoreFixer() + fix = fixer.generate_fix(temp_repo, gitignore_failing_finding) + + assert fix is not None + assert fix.attribute_id == "gitignore_completeness" + + # Should fail to apply if .gitignore doesn't exist + result = fix.apply(dry_run=False) + assert result is False # File doesn't exist + + def test_apply_fix_to_existing_gitignore( + self, temp_repo, gitignore_failing_finding + ): + """Test applying fix to existing .gitignore.""" + # Create existing .gitignore + gitignore_path = temp_repo.path / ".gitignore" + gitignore_path.write_text("# Existing patterns\n*.log\n") + + fixer = GitignoreFixer() + fix = fixer.generate_fix(temp_repo, gitignore_failing_finding) + + result = fix.apply(dry_run=False) + assert result is True + + # Check additions were made + content = gitignore_path.read_text() + assert "# AgentReady recommended patterns" in content + assert "__pycache__/" in content diff --git a/tests/unit/test_repomix.py b/tests/unit/test_repomix.py index a581e691..f25e0b15 100644 --- a/tests/unit/test_repomix.py +++ b/tests/unit/test_repomix.py @@ -112,7 +112,7 @@ def test_check_freshness_no_files(self, tmp_path): service = RepomixService(tmp_path) is_fresh, message = service.check_freshness() assert is_fresh is False - assert "not found" in message.lower() + assert "files found" in message.lower() def test_check_freshness_fresh_file(self, tmp_path): """Test freshness check with recent file.""" @@ -173,7 +173,18 @@ def test_attribute_properties(self): def test_assess_no_config(self, tmp_path): """Test assessment when Repomix not configured.""" - repo = Repository(path=tmp_path, languages={}) + # Create .git directory to make it a valid repo + (tmp_path / ".git").mkdir() + repo = Repository( + path=tmp_path, + name="test-repo", + url=None, + branch="main", + commit_hash="abc123", + languages={}, + total_files=0, + total_lines=0, + ) assessor = RepomixConfigAssessor() finding = assessor.assess(repo) @@ -184,26 +195,46 @@ def test_assess_no_config(self, tmp_path): def test_assess_config_but_no_output(self, tmp_path): """Test assessment with config but no output.""" - # Create config file + # Create .git directory and config file + (tmp_path / ".git").mkdir() config_path = tmp_path / "repomix.config.json" config_path.write_text("{}") - repo = Repository(path=tmp_path, languages={}) + repo = Repository( + path=tmp_path, + name="test-repo", + url=None, + branch="main", + commit_hash="abc123", + languages={}, + total_files=0, + total_lines=0, + ) assessor = RepomixConfigAssessor() finding = assessor.assess(repo) assert finding.status == "fail" assert finding.score == 50 assert "exists" in finding.evidence[0].lower() - assert "not found" in finding.evidence[1].lower() + assert "files found" in finding.evidence[1].lower() def test_assess_fresh_output(self, tmp_path): """Test assessment with fresh output.""" - # Create config and output + # Create .git directory, config and output + (tmp_path / ".git").mkdir() (tmp_path / "repomix.config.json").write_text("{}") (tmp_path / "repomix-output.md").write_text("content") - repo = Repository(path=tmp_path, languages={}) + repo = Repository( + path=tmp_path, + name="test-repo", + url=None, + branch="main", + commit_hash="abc123", + languages={}, + total_files=0, + total_lines=0, + ) assessor = RepomixConfigAssessor() finding = assessor.assess(repo) diff --git a/tests/unit/test_security.py b/tests/unit/test_security.py index 62f0e196..a2d638a2 100644 --- a/tests/unit/test_security.py +++ b/tests/unit/test_security.py @@ -1,9 +1,6 @@ """Security tests for AgentReady.""" from datetime import datetime -from pathlib import Path - -import pytest from agentready.models.assessment import Assessment from agentready.models.attribute import Attribute