diff --git a/.github/workflows/beta-release.yml b/.github/workflows/beta-release.yml index f19d3e607a..2802774fbd 100644 --- a/.github/workflows/beta-release.yml +++ b/.github/workflows/beta-release.yml @@ -97,13 +97,21 @@ jobs: - name: Install Rust toolchain (for building native Python packages) uses: dtolnay/rust-toolchain@stable + - name: Cache pip wheel cache (for compiled packages like real_ladybug) + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8-rust + key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8-rust- - name: Build application run: cd apps/frontend && npm run build @@ -181,13 +189,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-arm64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-arm64-3.12.8 + key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-arm64- + python-bundle-${{ runner.os }}-arm64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -265,13 +281,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~\AppData\Local\pip\Cache + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -335,13 +359,21 @@ jobs: flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08 + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index d50940c188..ac10837861 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -1,8 +1,10 @@ name: Prepare Release # Triggers when code is pushed to main (e.g., merging develop β†’ main) -# If package.json version is newer than the latest tag, creates a new tag -# which then triggers the release.yml workflow +# If package.json version is newer than the latest tag: +# 1. Validates CHANGELOG.md has an entry for this version (FAILS if missing) +# 2. Extracts release notes from CHANGELOG.md +# 3. Creates a new tag which triggers release.yml on: push: @@ -67,8 +69,122 @@ jobs: echo "⏭️ No release needed (package version not newer than latest tag)" fi - - name: Create and push tag + # CRITICAL: Validate CHANGELOG.md has entry for this version BEFORE creating tag + - name: Validate and extract changelog if: steps.check.outputs.should_release == 'true' + id: changelog + run: | + VERSION="${{ steps.check.outputs.new_version }}" + CHANGELOG_FILE="CHANGELOG.md" + + echo "πŸ” Validating CHANGELOG.md for version $VERSION..." + + if [ ! -f "$CHANGELOG_FILE" ]; then + echo "::error::CHANGELOG.md not found! Please create CHANGELOG.md with release notes." + exit 1 + fi + + # Extract changelog section for this version + # Looks for "## X.Y.Z" header and captures until next "## " or "---" or end + CHANGELOG_CONTENT=$(awk -v ver="$VERSION" ' + BEGIN { found=0; content="" } + /^## / { + if (found) exit + # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3") + if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") { + found=1 + # Skip the header line itself, we will add our own + next + } + } + /^---$/ { if (found) exit } + found { content = content $0 "\n" } + END { + if (!found) { + print "NOT_FOUND" + exit 1 + } + # Trim leading/trailing whitespace + gsub(/^[[:space:]]+|[[:space:]]+$/, "", content) + print content + } + ' "$CHANGELOG_FILE") + + if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then + echo "" + echo "::error::═══════════════════════════════════════════════════════════════════════" + echo "::error:: CHANGELOG VALIDATION FAILED" + echo "::error::═══════════════════════════════════════════════════════════════════════" + echo "::error::" + echo "::error:: Version $VERSION not found in CHANGELOG.md!" + echo "::error::" + echo "::error:: Before releasing, please update CHANGELOG.md with an entry like:" + echo "::error::" + echo "::error:: ## $VERSION - Your Release Title" + echo "::error::" + echo "::error:: ### ✨ New Features" + echo "::error:: - Feature description" + echo "::error::" + echo "::error:: ### πŸ› Bug Fixes" + echo "::error:: - Fix description" + echo "::error::" + echo "::error::═══════════════════════════════════════════════════════════════════════" + echo "" + + # Also add to job summary for visibility + echo "## ❌ Release Blocked: Missing Changelog" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Version **$VERSION** was not found in CHANGELOG.md." >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### How to fix:" >> $GITHUB_STEP_SUMMARY + echo "1. Update CHANGELOG.md with release notes for version $VERSION" >> $GITHUB_STEP_SUMMARY + echo "2. Commit and push the changes" >> $GITHUB_STEP_SUMMARY + echo "3. The release will automatically retry" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### Expected format:" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`markdown" >> $GITHUB_STEP_SUMMARY + echo "## $VERSION - Release Title" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### ✨ New Features" >> $GITHUB_STEP_SUMMARY + echo "- Feature description" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### πŸ› Bug Fixes" >> $GITHUB_STEP_SUMMARY + echo "- Fix description" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + + exit 1 + fi + + echo "βœ… Found changelog entry for version $VERSION" + echo "" + echo "--- Extracted Release Notes ---" + echo "$CHANGELOG_CONTENT" + echo "--- End Release Notes ---" + + # Save changelog to file for artifact upload + echo "$CHANGELOG_CONTENT" > changelog-extract.md + + # Also save to output (for short changelogs) + # Using heredoc for multiline output + { + echo "content<> $GITHUB_OUTPUT + + echo "changelog_valid=true" >> $GITHUB_OUTPUT + + # Upload changelog as artifact for release.yml to use + - name: Upload changelog artifact + if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true' + uses: actions/upload-artifact@v4 + with: + name: changelog-${{ steps.check.outputs.new_version }} + path: changelog-extract.md + retention-days: 1 + + - name: Create and push tag + if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true' run: | VERSION="${{ steps.check.outputs.new_version }}" TAG="v$VERSION" @@ -85,17 +201,19 @@ jobs: - name: Summary run: | - if [ "${{ steps.check.outputs.should_release }}" = "true" ]; then + if [ "${{ steps.check.outputs.should_release }}" = "true" ] && [ "${{ steps.changelog.outputs.changelog_valid }}" = "true" ]; then echo "## πŸš€ Release Triggered" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Version:** v${{ steps.check.outputs.new_version }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY + echo "βœ… Changelog validated and extracted from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY echo "The release workflow has been triggered and will:" >> $GITHUB_STEP_SUMMARY echo "1. Build binaries for all platforms" >> $GITHUB_STEP_SUMMARY - echo "2. Generate changelog from PRs" >> $GITHUB_STEP_SUMMARY + echo "2. Use changelog from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY echo "3. Create GitHub release" >> $GITHUB_STEP_SUMMARY echo "4. Update README with new version" >> $GITHUB_STEP_SUMMARY - else + elif [ "${{ steps.check.outputs.should_release }}" = "false" ]; then echo "## ⏭️ No Release Needed" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Package version:** ${{ steps.package.outputs.version }}" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c6b6ddc99c..36f4e13877 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,13 +46,21 @@ jobs: - name: Install Rust toolchain (for building native Python packages) uses: dtolnay/rust-toolchain@stable + - name: Cache pip wheel cache (for compiled packages like real_ladybug) + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8-rust + key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8-rust- - name: Build application run: cd apps/frontend && npm run build @@ -93,6 +101,8 @@ jobs: path: | apps/frontend/dist/*.dmg apps/frontend/dist/*.zip + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap # Apple Silicon build on ARM64 runner for native compilation build-macos-arm64: @@ -123,13 +133,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-arm64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-arm64-3.12.8 + key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-arm64- + python-bundle-${{ runner.os }}-arm64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -170,6 +188,8 @@ jobs: path: | apps/frontend/dist/*.dmg apps/frontend/dist/*.zip + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap build-windows: runs-on: windows-latest @@ -200,13 +220,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~\AppData\Local\pip\Cache + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -224,6 +252,8 @@ jobs: name: windows-builds path: | apps/frontend/dist/*.exe + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap build-linux: runs-on: ubuntu-latest @@ -261,13 +291,21 @@ jobs: flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08 + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -285,6 +323,8 @@ jobs: apps/frontend/dist/*.AppImage apps/frontend/dist/*.deb apps/frontend/dist/*.flatpak + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap create-release: needs: [build-macos-intel, build-macos-arm64, build-windows, build-linux] @@ -304,16 +344,30 @@ jobs: - name: Flatten and validate artifacts run: | mkdir -p release-assets - find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec cp {} release-assets/ \; + find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" -o -name "*.yml" -o -name "*.blockmap" \) -exec cp {} release-assets/ \; + + # Validate that installer files exist (not just manifests) + installer_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l) + if [ "$installer_count" -eq 0 ]; then + echo "::error::No installer artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files." + exit 1 + fi + + echo "Found $installer_count installer(s):" + find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec basename {} \; - # Validate that at least one artifact was copied - artifact_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l) - if [ "$artifact_count" -eq 0 ]; then - echo "::error::No build artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files." + # Validate that electron-updater manifest files are present (required for auto-updates) + yml_count=$(find release-assets -type f -name "*.yml" | wc -l) + if [ "$yml_count" -eq 0 ]; then + echo "::error::No update manifest (.yml) files found! Auto-update architecture detection will not work." exit 1 fi - echo "Found $artifact_count artifact(s):" + echo "Found $yml_count manifest file(s):" + find release-assets -type f -name "*.yml" -exec basename {} \; + + echo "" + echo "All release assets:" ls -la release-assets/ - name: Generate checksums @@ -473,23 +527,78 @@ jobs: cat release-assets/checksums.sha256 >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY - - name: Generate changelog - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + - name: Extract changelog from CHANGELOG.md + if: ${{ github.event_name == 'push' }} id: changelog - uses: release-drafter/release-drafter@v6 - with: - config-name: release-drafter.yml - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Extract version from tag (v2.7.2 -> 2.7.2) + VERSION=${GITHUB_REF_NAME#v} + CHANGELOG_FILE="CHANGELOG.md" + + echo "πŸ“‹ Extracting release notes for version $VERSION from CHANGELOG.md..." + + if [ ! -f "$CHANGELOG_FILE" ]; then + echo "::warning::CHANGELOG.md not found, using minimal release notes" + echo "body=Release v$VERSION" >> $GITHUB_OUTPUT + exit 0 + fi + + # Extract changelog section for this version + # Looks for "## X.Y.Z" header and captures until next "## " or "---" + CHANGELOG_CONTENT=$(awk -v ver="$VERSION" ' + BEGIN { found=0; content="" } + /^## / { + if (found) exit + # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3") + if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") { + found=1 + next + } + } + /^---$/ { if (found) exit } + found { content = content $0 "\n" } + END { + if (!found) { + print "NOT_FOUND" + exit 0 + } + # Trim leading/trailing whitespace + gsub(/^[[:space:]]+|[[:space:]]+$/, "", content) + print content + } + ' "$CHANGELOG_FILE") + + if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then + echo "::warning::Version $VERSION not found in CHANGELOG.md, using minimal release notes" + CHANGELOG_CONTENT="Release v$VERSION + +See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details." + fi + + echo "βœ… Extracted changelog content" + + # Save to file first (more reliable for multiline) + echo "$CHANGELOG_CONTENT" > changelog-body.md + + # Use file-based output for multiline content + { + echo "body<> $GITHUB_OUTPUT - name: Create Release - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + if: ${{ github.event_name == 'push' }} uses: softprops/action-gh-release@v2 with: body: | ${{ steps.changelog.outputs.body }} + --- + ${{ steps.virustotal.outputs.vt_results }} + + **Full Changelog**: https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md files: release-assets/* draft: false prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }} @@ -500,7 +609,8 @@ jobs: update-readme: needs: [create-release] runs-on: ubuntu-latest - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + # Only update README on actual releases (tag push), not dry runs + if: ${{ github.event_name == 'push' }} permissions: contents: write steps: diff --git a/.gitignore b/.gitignore index 7f53e4c59a..8c06000cc1 100644 --- a/.gitignore +++ b/.gitignore @@ -163,3 +163,4 @@ _bmad-output/ .claude/ /docs OPUS_ANALYSIS_AND_IDEAS.md +/.github/agents diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f67b77c813..0f996bccc2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,6 @@ repos: # Version sync - propagate root package.json version to all files + # NOTE: Skip in worktrees - version sync modifies root files which don't exist in worktree - repo: local hooks: - id: version-sync @@ -8,6 +9,12 @@ repos: args: - -c - | + # Skip in worktrees - .git is a file pointing to main repo, not a directory + # Version sync modifies root-level files that may not exist in worktree context + if [ -f ".git" ]; then + echo "Skipping version-sync in worktree (root files not accessible)" + exit 0 + fi VERSION=$(node -p "require('./package.json').version") if [ -n "$VERSION" ]; then @@ -81,6 +88,7 @@ repos: # Python tests (apps/backend/) - skip slow/integration tests for pre-commit speed # Tests to skip: graphiti (external deps), merge_file_tracker/service_orchestrator/worktree/workspace (Windows path/git issues) + # NOTE: Skip this hook in worktrees (where .git is a file, not a directory) - repo: local hooks: - id: pytest @@ -89,6 +97,12 @@ repos: args: - -c - | + # Skip in worktrees - .git is a file pointing to main repo, not a directory + # This prevents path resolution issues with ../../tests/ in worktree context + if [ -f ".git" ]; then + echo "Skipping pytest in worktree (path resolution would fail)" + exit 0 + fi cd apps/backend if [ -f ".venv/bin/pytest" ]; then PYTEST_CMD=".venv/bin/pytest" @@ -113,18 +127,37 @@ repos: pass_filenames: false # Frontend linting (apps/frontend/) + # NOTE: These hooks check for worktree context to avoid npm/node_modules issues - repo: local hooks: - id: eslint name: ESLint - entry: bash -c 'cd apps/frontend && npm run lint' + entry: bash + args: + - -c + - | + # Skip in worktrees if node_modules doesn't exist (dependencies not installed) + if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then + echo "Skipping ESLint in worktree (node_modules not found)" + exit 0 + fi + cd apps/frontend && npm run lint language: system files: ^apps/frontend/.*\.(ts|tsx|js|jsx)$ pass_filenames: false - id: typecheck name: TypeScript Check - entry: bash -c 'cd apps/frontend && npm run typecheck' + entry: bash + args: + - -c + - | + # Skip in worktrees if node_modules doesn't exist (dependencies not installed) + if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then + echo "Skipping TypeScript check in worktree (node_modules not found)" + exit 0 + fi + cd apps/frontend && npm run typecheck language: system files: ^apps/frontend/.*\.(ts|tsx)$ pass_filenames: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fb1a26e82..22c43eb8da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,283 @@ +## 2.7.2 - Stability & Performance Enhancements + +### ✨ New Features + +- Added refresh button to Kanban board for manually reloading tasks + +- Terminal dropdown with built-in and external options in task review + +- Centralized CLI tool path management with customizable settings + +- Files tab in task details panel for better file organization + +- Enhanced PR review page with filtering capabilities + +- GitLab integration support + +- Automated PR review with follow-up support and structured outputs + +- UI scale feature with 75-200% range for accessibility + +- Python 3.12 bundled with packaged Electron app + +- OpenRouter support as LLM/embedding provider + +- Internationalization (i18n) system for multi-language support + +- Flatpak packaging support for Linux + +- Path-aware AI merge resolution with device code streaming + +### πŸ› οΈ Improvements + +- Improved terminal experience with persistent state when switching projects + +- Enhanced PR review with structured outputs and fork support + +- Better UX for display and scaling changes + +- Convert synchronous I/O to async operations in worktree handlers + +- Enhanced logs for commit linting stage + +- Remove top navigation bars for cleaner UI + +- Enhanced PR detail area visual design + +- Improved CLI tool detection with more language support + +- Added iOS/Swift project detection + +- Optimize performance by removing projectTabs from useEffect dependencies + +- Improved Python detection and version validation for compatibility + +### πŸ› Bug Fixes + +- Fixed CI Python setup and PR status gate checks + +- Fixed cross-platform CLI path detection and clearing in settings + +- Preserve original task description after spec creation + +- Fixed learning loop to retrieve patterns and gotchas from memory + +- Resolved frontend lag and updated dependencies + +- Fixed Content-Security-Policy to allow external HTTPS images + +- Fixed PR review isolation by using temporary worktree + +- Fixed Homebrew Python detection to prefer versioned Python over system python3 + +- Added support for Bun 1.2.0+ lock file format detection + +- Fixed infinite re-render loop in task selection + +- Fixed infinite loop in task detail merge preview loading + +- Resolved Windows EINVAL error when opening worktree in VS Code + +- Fixed fallback to prevent tasks stuck in ai_review status + +- Fixed SDK permissions to include spec_dir + +- Added --base-branch argument support to spec_runner + +- Allow Windows to run CC PR Reviewer + +- Fixed model selection to respect task_metadata.json + +- Improved GitHub PR review by passing repo parameter explicitly + +- Fixed electron-log imports with .js extension + +- Fixed Swift detection order in project analyzer + +- Prevent TaskEditDialog from unmounting when opened + +- Fixed subprocess handling for Python paths with spaces + +- Fixed file system race conditions and unused variables in security scanning + +- Resolved Python detection and backend packaging issues + +- Fixed version-specific links in README and pre-commit hooks + +- Fixed task status persistence reverting on refresh + +- Proper semver comparison for pre-release versions + +- Use virtual environment Python for all services to fix dotenv errors + +- Fixed explicit Windows System32 tar path for builds + +- Added augmented PATH environment to all GitHub CLI calls + +- Use PowerShell for tar extraction on Windows + +- Added --force-local flag to tar on Windows + +- Stop tracking spec files in git + +- Fixed GitHub API calls with explicit GET method for comment fetches + +- Support archiving tasks across all worktree locations + +- Validated backend source path before using it + +- Resolved spawn Python ENOENT error on Linux + +- Fixed CodeQL alerts for uncontrolled command line + +- Resolved GitHub follow-up review API issues + +- Fixed relative path normalization to POSIX format + +- Accepted bug_fix workflow_type alias during planning + +- Added global spec numbering lock to prevent collisions + +- Fixed ideation status sync + +- Stopped running process when task status changes away from in_progress + +- Removed legacy path from auto-claude source detection + +- Resolved Python environment race condition + +--- + +## What's Changed + +- fix(ci): add Python setup to beta-release and fix PR status gate checks (#565) by @Andy in c2148bb9 +- fix: detect and clear cross-platform CLI paths in settings (#535) by @Andy in 29e45505 +- fix(ui): preserve original task description after spec creation (#536) by @Andy in 7990dcb4 +- fix(memory): fix learning loop to retrieve patterns and gotchas (#530) by @Andy in f58c2578 +- fix: resolve frontend lag and update dependencies (#526) by @Andy in 30f7951a +- feat(kanban): add refresh button to manually reload tasks (#548) by @Adryan Serage in 252242f9 +- fix(csp): allow external HTTPS images in Content-Security-Policy (#549) by @Michael Ludlow in 3db02c5d +- fix(pr-review): use temporary worktree for PR review isolation (#532) by @Andy in 344ec65e +- fix: prefer versioned Homebrew Python over system python3 (#494) by @Navid in 8d58dd6f +- fix(detection): support bun.lock text format for Bun 1.2.0+ (#525) by @Andy in 4da8cd66 +- chore: bump version to 2.7.2-beta.12 (#460) by @Andy in 8e5c11ac +- Fix/windows issues (#471) by @Andy in 72106109 +- fix(ci): add Rust toolchain for Intel Mac builds (#459) by @Andy in 52a4fcc6 +- fix: create spec.md during roadmap-to-task conversion (#446) by @Mulaveesala Pranaveswar in fb6b7fc6 +- fix(pr-review): treat LOW-only findings as ready to merge (#455) by @Andy in 0f9c5b84 +- Fix/2.7.2 beta12 (#424) by @Andy in 5d8ede23 +- feat: remove top bars (#386) by @VinΓ­cius Santos in da31b687 +- fix: prevent infinite re-render loop in task selection useEffect (#442) by @Abe Diaz in 2effa535 +- fix: accept Python 3.12+ in install-backend.js (#443) by @Abe Diaz in c15bb311 +- fix: infinite loop in useTaskDetail merge preview loading (#444) by @Abe Diaz in 203a970a +- fix(windows): resolve EINVAL error when opening worktree in VS Code (#434) by @VinΓ­cius Santos in 3c0708b7 +- feat(frontend): Add Files tab to task details panel (#430) by @Mitsu in 666794b5 +- refactor: remove deprecated TaskDetailPanel component (#432) by @Mitsu in ac8dfcac +- fix(ui): add fallback to prevent tasks stuck in ai_review status (#397) by @Michael Ludlow in 798ca79d +- feat: Enhance the look of the PR Detail area (#427) by @Alex in bdb01549 +- ci: remove conventional commits PR title validation workflow by @AndyMik90 in 515b73b5 +- fix(client): add spec_dir to SDK permissions (#429) by @Mitsu in 88c76059 +- fix(spec_runner): add --base-branch argument support (#428) by @Mitsu in 62a75515 +- feat: enhance pr review page to include PRs filters (#423) by @Alex in 717fba04 +- feat: add gitlab integration (#254) by @Mitsu in 0a571d3a +- fix: Allow windows to run CC PR Reviewer (#406) by @Alex in 2f662469 +- fix(model): respect task_metadata.json model selection (#415) by @Andy in e7e6b521 +- feat(build): add Flatpak packaging support for Linux (#404) by @Mitsu in 230de5fc +- fix(github): pass repo parameter to GHClient for explicit PR resolution (#413) by @Andy in 4bdf7a0c +- chore(ci): remove redundant CLA GitHub Action workflow by @AndyMik90 in a39ea49d +- fix(frontend): add .js extension to electron-log/main imports by @AndyMik90 in 9aef0dd0 +- fix: 2.7.2 bug fixes and improvements (#388) by @Andy in 05131217 +- fix(analyzer): move Swift detection before Ruby detection (#401) by @Michael Ludlow in 321c9712 +- fix(ui): prevent TaskEditDialog from unmounting when opened (#395) by @Michael Ludlow in 98b12ed8 +- fix: improve CLI tool detection and add Claude CLI path settings (#393) by @Joe in aaa83131 +- feat(analyzer): add iOS/Swift project detection (#389) by @Michael Ludlow in 68548e33 +- fix(github): improve PR review with structured outputs and fork support (#363) by @Andy in 7751588e +- fix(ideation): update progress calculation to include just-completed ideation type (#381) by @Illia Filippov in 8b4ce58c +- Fixes failing spec - "gh CLI Check Handler - should return installed: true when gh CLI is found" (#370) by @Ian in bc220645 +- fix: Memory Status card respects configured embedding provider (#336) (#373) by @Michael Ludlow in db0cbea3 +- fix: fixed version-specific links in readme and pre-commit hook that updates them (#378) by @Ian in 0ca2e3f6 +- docs: add security research documentation (#361) by @Brian in 2d3b7fb4 +- fix/Improving UX for Display/Scaling Changes (#332) by @Kevin Rajan in 9bbdef09 +- fix(perf): remove projectTabs from useEffect deps to fix re-render loop (#362) by @Michael Ludlow in 753dc8bb +- fix(security): invalidate profile cache when file is created/modified (#355) by @Michael Ludlow in 20f20fa3 +- fix(subprocess): handle Python paths with spaces (#352) by @Michael Ludlow in eabe7c7d +- fix: Resolve pre-commit hook failures with version sync, pytest path, ruff version, and broken quality-dco workflow (#334) by @Ian in 1fa7a9c7 +- fix(terminal): preserve terminal state when switching projects (#358) by @Andy in 7881b2d1 +- fix(analyzer): add C#/Java/Swift/Kotlin project files to security hash (#351) by @Michael Ludlow in 4e71361b +- fix: make backend tests pass on Windows (#282) by @Oluwatosin Oyeladun in 4dcc5afa +- fix(ui): close parent modal when Edit dialog opens (#354) by @Michael Ludlow in e9782db0 +- chore: bump version to 2.7.2-beta.10 by @AndyMik90 in 40d04d7c +- feat: add terminal dropdown with inbuilt and external options in task review (#347) by @JoshuaRileyDev in fef07c95 +- refactor: remove deprecated code across backend and frontend (#348) by @Mitsu in 9d43abed +- feat: centralize CLI tool path management (#341) by @HSSAINI Saad in d51f4562 +- refactor(components): remove deprecated TaskDetailPanel re-export (#344) by @Mitsu in 787667e9 +- chore: Refactor/kanban realtime status sync (#249) by @souky-byte in 9734b70b +- refactor(settings): remove deprecated ProjectSettings modal and hooks (#343) by @Mitsu in fec6b9f3 +- perf: convert synchronous I/O to async operations in worktree handlers (#337) by @JoshuaRileyDev in d3a63b09 +- feat: bump version (#329) by @Alex in 50e3111a +- fix(ci): remove version bump to fix branch protection conflict (#325) by @Michael Ludlow in 8a80b1d5 +- fix(tasks): sync status to worktree implementation plan to prevent reset (#243) (#323) by @Alex in cb6b2165 +- fix(ci): add auto-updater manifest files and version auto-update (#317) by @Michael Ludlow in 661e47c3 +- fix(project): fix task status persistence reverting on refresh (#246) (#318) by @Michael Ludlow in e80ef79d +- fix(updater): proper semver comparison for pre-release versions (#313) by @Michael Ludlow in e1b0f743 +- fix(python): use venv Python for all services to fix dotenv errors (#311) by @Alex in 92c6f278 +- chore(ci): cancel in-progress runs (#302) by @Oluwatosin Oyeladun in 1c142273 +- fix(build): use explicit Windows System32 tar path (#308) by @Andy in c0a02a45 +- fix(github): add augmented PATH env to all gh CLI calls by @AndyMik90 in 086429cb +- fix(build): use PowerShell for tar extraction on Windows by @AndyMik90 in d9fb8f29 +- fix(build): add --force-local flag to tar on Windows (#303) by @Andy in d0b0b3df +- fix: stop tracking spec files in git (#295) by @Andy in 937a60f8 +- Fix/2.7.2 fixes (#300) by @Andy in 7a51cbd5 +- feat(merge,oauth): add path-aware AI merge resolution and device code streaming (#296) by @Andy in 26beefe3 +- feat: enhance the logs for the commit linting stage (#293) by @Alex in 8416f307 +- fix(github): add explicit GET method to gh api comment fetches (#294) by @Andy in 217249c8 +- fix(frontend): support archiving tasks across all worktree locations (#286) by @Andy in 8bb3df91 +- Potential fix for code scanning alert no. 224: Uncontrolled command line (#285) by @Andy in 5106c6e9 +- fix(frontend): validate backend source path before using it (#287) by @Andy in 3ff61274 +- feat(python): bundle Python 3.12 with packaged Electron app (#284) by @Andy in 7f19c2e1 +- fix: resolve spawn python ENOENT error on Linux by using getAugmentedEnv() (#281) by @Todd W. Bucy in d98e2830 +- fix(ci): add write permissions to beta-release update-version job by @AndyMik90 in 0b874d4b +- chore(deps): bump @xterm/xterm from 5.5.0 to 6.0.0 in /apps/frontend (#270) by @dependabot[bot] in 50dd1078 +- fix(github): resolve follow-up review API issues by @AndyMik90 in f1cc5a09 +- fix(security): resolve CodeQL file system race conditions and unused variables (#277) by @Andy in b005fa5c +- fix(ci): use correct electron-builder arch flags (#278) by @Andy in d79f2da4 +- chore(deps): bump jsdom from 26.1.0 to 27.3.0 in /apps/frontend (#268) by @dependabot[bot] in 5ac566e2 +- chore(deps): bump typescript-eslint in /apps/frontend (#269) by @dependabot[bot] in f49d4817 +- fix(ci): use develop branch for dry-run builds in beta-release workflow (#276) by @Andy in 1e1d7d9b +- fix: accept bug_fix workflow_type alias during planning (#240) by @Daniel Frey in e74a3dff +- fix(paths): normalize relative paths to posix (#239) by @Daniel Frey in 6ac8250b +- chore(deps): bump @electron/rebuild in /apps/frontend (#271) by @dependabot[bot] in a2cee694 +- chore(deps): bump vitest from 4.0.15 to 4.0.16 in /apps/frontend (#272) by @dependabot[bot] in d4cad80a +- feat(github): add automated PR review with follow-up support (#252) by @Andy in 596e9513 +- ci: implement enterprise-grade PR quality gates and security scanning (#266) by @Alex in d42041c5 +- fix: update path resolution for ollama_model_detector.py in memory handlers (#263) by @delyethan in a3f87540 +- feat: add i18n internationalization system (#248) by @Mitsu in f8438112 +- Revert "Feat/Auto Fix Github issues and do extensive AI PR reviews (#250)" (#251) by @Andy in 5e8c5308 +- Feat/Auto Fix Github issues and do extensive AI PR reviews (#250) by @Andy in 348de6df +- fix: resolve Python detection and backend packaging issues (#241) by @HSSAINI Saad in 0f7d6e05 +- fix: add future annotations import to discovery.py (#229) by @Joris Slagter in 5ccdb6ab +- Fix/ideation status sync (#212) by @souky-byte in 6ec8549f +- fix(core): add global spec numbering lock to prevent collisions (#209) by @Andy in 53527293 +- feat: Add OpenRouter as LLM/embedding provider (#162) by @Fernando Possebon in 02bef954 +- fix: Add Python 3.10+ version validation and GitHub Actions Python setup (#180 #167) (#208) by @Fernando Possebon in f168bdc3 +- fix(ci): correct welcome workflow PR message (#206) by @Andy in e3eec68a +- Feat/beta release (#193) by @Andy in 407a0bee +- feat/beta-release (#190) by @Andy in 8f766ad1 +- fix/PRs from old main setup to apps structure (#185) by @Andy in ced2ad47 +- fix: hide status badge when execution phase badge is showing (#154) by @Andy in 05f5d303 +- feat: Add UI scale feature with 75-200% range (#125) by @Enes CingΓΆz in 6951251b +- fix(task): stop running process when task status changes away from in_progress by @AndyMik90 in 30e7536b +- Fix/linear 400 error by @Andy in 220faf0f +- fix: remove legacy path from auto-claude source detection (#148) by @Joris Slagter in f96c6301 +- fix: resolve Python environment race condition (#142) by @Joris Slagter in ebd8340d +- Feat: Ollama download progress tracking with new apps structure (#141) by @rayBlock in df779530 +- Feature/apps restructure v2.7.2 (#138) by @Andy in 0adaddac +- docs: Add Git Flow branching strategy to CONTRIBUTING.md by @AndyMik90 in 91f7051d + +## Thanks to all contributors + +@Andy, @Adryan Serage, @Michael Ludlow, @Navid, @Mulaveesala Pranaveswar, @VinΓ­cius Santos, @Abe Diaz, @Mitsu, @Alex, @AndyMik90, @Joe, @Illia Filippov, @Ian, @Brian, @Kevin Rajan, @Oluwatosin Oyeladun, @JoshuaRileyDev, @HSSAINI Saad, @souky-byte, @Todd W. Bucy, @dependabot[bot], @Daniel Frey, @delyethan, @Joris Slagter, @Fernando Possebon, @Enes CingΓΆz, @rayBlock + ## 2.7.1 - Build Pipeline Enhancements ### πŸ› οΈ Improvements diff --git a/README.md b/README.md index d22c5216a2..b5c6f60cef 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,9 @@ ![Auto Claude Kanban Board](.github/assets/Auto-Claude-Kanban.png) - -[![Version](https://img.shields.io/badge/version-2.7.2-blue?style=flat-square)](https://github.com/AndyMik90/Auto-Claude/releases/tag/v2.7.2) - [![License](https://img.shields.io/badge/license-AGPL--3.0-green?style=flat-square)](./agpl-3.0.txt) [![Discord](https://img.shields.io/badge/Discord-Join%20Community-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/KCXaPBr4Dj) +[![YouTube](https://img.shields.io/badge/YouTube-Subscribe-FF0000?style=flat-square&logo=youtube&logoColor=white)](https://www.youtube.com/@AndreMikalsen) [![CI](https://img.shields.io/github/actions/workflow/status/AndyMik90/Auto-Claude/ci.yml?branch=main&style=flat-square&label=CI)](https://github.com/AndyMik90/Auto-Claude/actions) --- @@ -24,11 +22,11 @@ | Platform | Download | |----------|----------| -| **Windows** | [Auto-Claude-2.7.1-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-win32-x64.exe) | -| **macOS (Apple Silicon)** | [Auto-Claude-2.7.1-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-arm64.dmg) | -| **macOS (Intel)** | [Auto-Claude-2.7.1-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-x64.dmg) | -| **Linux** | [Auto-Claude-2.7.1-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-x86_64.AppImage) | -| **Linux (Debian)** | [Auto-Claude-2.7.1-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-amd64.deb) | +| **Windows** | [Auto-Claude-2.7.2-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-win32-x64.exe) | +| **macOS (Apple Silicon)** | [Auto-Claude-2.7.2-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-arm64.dmg) | +| **macOS (Intel)** | [Auto-Claude-2.7.2-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-x64.dmg) | +| **Linux** | [Auto-Claude-2.7.2-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-x86_64.AppImage) | +| **Linux (Debian)** | [Auto-Claude-2.7.2-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-amd64.deb) | ### Beta Release @@ -59,7 +57,6 @@ - **Claude Pro/Max subscription** - [Get one here](https://claude.ai/upgrade) - **Claude Code CLI** - `npm install -g @anthropic-ai/claude-code` - **Git repository** - Your project must be initialized as a git repo -- **Python 3.12+** - Required for the backend and Memory Layer --- @@ -148,113 +145,11 @@ See [guides/CLI-USAGE.md](guides/CLI-USAGE.md) for complete CLI documentation. --- -## Configuration +## Development -Create `apps/backend/.env` from the example: +Want to build from source or contribute? See [CONTRIBUTING.md](CONTRIBUTING.md) for complete development setup instructions. -```bash -cp apps/backend/.env.example apps/backend/.env -``` - -| Variable | Required | Description | -|----------|----------|-------------| -| `CLAUDE_CODE_OAUTH_TOKEN` | Yes | OAuth token from `claude setup-token` | -| `GRAPHITI_ENABLED` | No | Enable Memory Layer for cross-session context | -| `AUTO_BUILD_MODEL` | No | Override the default Claude model | -| `GITLAB_TOKEN` | No | GitLab Personal Access Token for GitLab integration | -| `GITLAB_INSTANCE_URL` | No | GitLab instance URL (defaults to gitlab.com) | -| `LINEAR_API_KEY` | No | Linear API key for task sync | - ---- - -## Building from Source - -For contributors and development: - -```bash -# Clone the repository -git clone https://github.com/AndyMik90/Auto-Claude.git -cd Auto-Claude - -# Install all dependencies -npm run install:all - -# Run in development mode -npm run dev - -# Or build and run -npm start -``` - -**System requirements for building:** -- Node.js 24+ -- Python 3.12+ -- npm 10+ - -**Installing dependencies by platform:** - -
-Windows - -```bash -winget install Python.Python.3.12 -winget install OpenJS.NodeJS.LTS -``` - -
- -
-macOS - -```bash -brew install python@3.12 node@24 -``` - -
- -
-Linux (Ubuntu/Debian) - -```bash -sudo apt install python3.12 python3.12-venv -curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - -sudo apt install -y nodejs -``` - -
- -
-Linux (Fedora) - -```bash -sudo dnf install python3.12 nodejs npm -``` - -
- -See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed development setup. - -### Building Flatpak - -To build the Flatpak package, you need additional dependencies: - -```bash -# Fedora/RHEL -sudo dnf install flatpak-builder - -# Ubuntu/Debian -sudo apt install flatpak-builder - -# Install required Flatpak runtimes -flatpak install flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 -flatpak install flathub org.electronjs.Electron2.BaseApp//25.08 - -# Build the Flatpak -cd apps/frontend -npm run package:flatpak -``` - -The Flatpak will be created in `apps/frontend/dist/`. +For Linux-specific builds (Flatpak, AppImage), see [guides/linux.md](guides/linux.md). --- @@ -284,7 +179,7 @@ All releases are: | `npm run package:mac` | Package for macOS | | `npm run package:win` | Package for Windows | | `npm run package:linux` | Package for Linux | -| `npm run package:flatpak` | Package as Flatpak | +| `npm run package:flatpak` | Package as Flatpak (see [guides/linux.md](guides/linux.md)) | | `npm run lint` | Run linter | | `npm test` | Run frontend tests | | `npm run test:backend` | Run backend tests | @@ -316,3 +211,11 @@ We welcome contributions! Please read [CONTRIBUTING.md](CONTRIBUTING.md) for: Auto Claude is free to use. If you modify and distribute it, or run it as a service, your code must also be open source under AGPL-3.0. Commercial licensing available for closed-source use cases. + +--- + +## Star History + +[![GitHub Repo stars](https://img.shields.io/github/stars/AndyMik90/Auto-Claude?style=social)](https://github.com/AndyMik90/Auto-Claude/stargazers) + +[![Star History Chart](https://api.star-history.com/svg?repos=AndyMik90/Auto-Claude&type=Date)](https://star-history.com/#AndyMik90/Auto-Claude&Date) diff --git a/RELEASE.md b/RELEASE.md index d7f6eb10dd..21d0e6b53d 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -69,9 +69,38 @@ This will: - Update `apps/frontend/package.json` - Update `package.json` (root) - Update `apps/backend/__init__.py` +- Check if `CHANGELOG.md` has an entry for the new version (warns if missing) - Create a commit with message `chore: bump version to X.Y.Z` -### Step 2: Push and Create PR +### Step 2: Update CHANGELOG.md (REQUIRED) + +**IMPORTANT: The release will fail if CHANGELOG.md doesn't have an entry for the new version.** + +Add release notes to `CHANGELOG.md` at the top of the file: + +```markdown +## 2.8.0 - Your Release Title + +### ✨ New Features +- Feature description + +### πŸ› οΈ Improvements +- Improvement description + +### πŸ› Bug Fixes +- Fix description + +--- +``` + +Then amend the version bump commit: + +```bash +git add CHANGELOG.md +git commit --amend --no-edit +``` + +### Step 3: Push and Create PR ```bash # Push your branch @@ -81,24 +110,25 @@ git push origin your-branch gh pr create --base main --title "Release v2.8.0" ``` -### Step 3: Merge to Main +### Step 4: Merge to Main Once the PR is approved and merged to `main`, GitHub Actions will automatically: 1. **Detect the version bump** (`prepare-release.yml`) -2. **Create a git tag** (e.g., `v2.8.0`) -3. **Trigger the release workflow** (`release.yml`) -4. **Build binaries** for all platforms: +2. **Validate CHANGELOG.md** has an entry for the new version (FAILS if missing) +3. **Extract release notes** from CHANGELOG.md +4. **Create a git tag** (e.g., `v2.8.0`) +5. **Trigger the release workflow** (`release.yml`) +6. **Build binaries** for all platforms: - macOS Intel (x64) - code signed & notarized - macOS Apple Silicon (arm64) - code signed & notarized - Windows (NSIS installer) - code signed - Linux (AppImage + .deb) -5. **Generate changelog** from merged PRs (using release-drafter) -6. **Scan binaries** with VirusTotal -7. **Create GitHub release** with all artifacts -8. **Update README** with new version badge and download links +7. **Scan binaries** with VirusTotal +8. **Create GitHub release** with release notes from CHANGELOG.md +9. **Update README** with new version badge and download links -### Step 4: Verify +### Step 5: Verify After merging, check: - [GitHub Actions](https://github.com/AndyMik90/Auto-Claude/actions) - ensure all workflows pass @@ -113,28 +143,49 @@ We follow [Semantic Versioning](https://semver.org/): - **MINOR** (0.X.0): New features, backwards compatible - **PATCH** (0.0.X): Bug fixes, backwards compatible -## Changelog Generation +## Changelog Management + +Release notes are managed in `CHANGELOG.md` and used for GitHub releases. + +### Changelog Format -Changelogs are automatically generated from merged PRs using [Release Drafter](https://github.com/release-drafter/release-drafter). +Each version entry in `CHANGELOG.md` should follow this format: -### PR Labels for Changelog Categories +```markdown +## X.Y.Z - Release Title -| Label | Category | -|-------|----------| -| `feature`, `enhancement` | New Features | -| `bug`, `fix` | Bug Fixes | -| `improvement`, `refactor` | Improvements | -| `documentation` | Documentation | -| (any other) | Other Changes | +### ✨ New Features +- Feature description with context -**Tip:** Add appropriate labels to your PRs for better changelog organization. +### πŸ› οΈ Improvements +- Improvement description + +### πŸ› Bug Fixes +- Fix description + +--- +``` + +### Changelog Validation + +The release workflow **validates** that `CHANGELOG.md` has an entry for the version being released: + +- If the entry is **missing**, the release is **blocked** with a clear error message +- If the entry **exists**, its content is used for the GitHub release notes + +### Writing Good Release Notes + +- **Be specific**: Instead of "Fixed bug", write "Fixed crash when opening large files" +- **Group by impact**: Features first, then improvements, then fixes +- **Credit contributors**: Mention contributors for significant changes +- **Link issues**: Reference GitHub issues where relevant (e.g., "Fixes #123") ## Workflows | Workflow | Trigger | Purpose | |----------|---------|---------| -| `prepare-release.yml` | Push to `main` | Detects version bump, creates tag | -| `release.yml` | Tag `v*` pushed | Builds binaries, creates release | +| `prepare-release.yml` | Push to `main` | Detects version bump, **validates CHANGELOG.md**, creates tag | +| `release.yml` | Tag `v*` pushed | Builds binaries, extracts changelog, creates release | | `validate-version.yml` | Tag `v*` pushed | Validates tag matches package.json | | `update-readme` (in release.yml) | After release | Updates README with new version | @@ -153,6 +204,22 @@ Changelogs are automatically generated from merged PRs using [Release Drafter](h git diff HEAD~1 --name-only | grep package.json ``` +### Release blocked: Missing changelog entry + +If you see "CHANGELOG VALIDATION FAILED" in the workflow: + +1. The `prepare-release.yml` workflow validated that `CHANGELOG.md` doesn't have an entry for the new version +2. **Fix**: Add an entry to `CHANGELOG.md` with the format `## X.Y.Z - Title` +3. Commit and push the changelog update +4. The workflow will automatically retry when the changes are pushed to `main` + +```bash +# Add changelog entry, then: +git add CHANGELOG.md +git commit -m "docs: add changelog for vX.Y.Z" +git push origin main +``` + ### Build failed after tag was created - The release won't be published if builds fail diff --git a/apps/backend/.env.example b/apps/backend/.env.example index b481cf5b7d..5f0f91db6d 100644 --- a/apps/backend/.env.example +++ b/apps/backend/.env.example @@ -108,6 +108,32 @@ # If not set, will auto-detect from git remote # GITLAB_PROJECT=mygroup/myproject +# ============================================================================= +# AZURE DEVOPS INTEGRATION (OPTIONAL) +# ============================================================================= +# Enable Azure DevOps integration for work items and pull requests. +# Supports both Azure DevOps Services (cloud) and Azure DevOps Server (on-prem). +# +# Authentication: Personal Access Token (PAT) +# Create at: https://dev.azure.com/{org}/_usersSettings/tokens +# Required scopes: Code (Read & Write), Work Items (Read & Write) + +# Azure DevOps Organization (REQUIRED for ADO) +# ADO_ORGANIZATION=myorganization + +# Azure DevOps Project (REQUIRED for ADO) +# ADO_PROJECT=myproject + +# Azure DevOps Repository Name (OPTIONAL - defaults to project name) +# ADO_REPO_NAME=myrepo + +# Azure DevOps Personal Access Token (REQUIRED for ADO) +# ADO_PAT=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +# Azure DevOps Instance URL (OPTIONAL - defaults to Azure DevOps Services) +# For Azure DevOps Server (on-prem): ADO_INSTANCE_URL=https://devops.yourcompany.com +# ADO_INSTANCE_URL=https://dev.azure.com + # ============================================================================= # UI SETTINGS (OPTIONAL) # ============================================================================= diff --git a/apps/backend/agents/README.md b/apps/backend/agents/README.md index 1cf2b2fb81..85253eae26 100644 --- a/apps/backend/agents/README.md +++ b/apps/backend/agents/README.md @@ -26,7 +26,7 @@ auto-claude/agents/ ### `utils.py` (3.6 KB) - Git operations: `get_latest_commit()`, `get_commit_count()` - Plan management: `load_implementation_plan()`, `find_subtask_in_plan()`, `find_phase_for_subtask()` -- Workspace sync: `sync_plan_to_source()` +- Workspace sync: `sync_spec_to_source()` ### `memory.py` (13 KB) - Dual-layer memory system (Graphiti primary, file-based fallback) @@ -73,7 +73,7 @@ from agents import ( # Utilities get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) ``` diff --git a/apps/backend/agents/__init__.py b/apps/backend/agents/__init__.py index 37dae174c4..4eed468607 100644 --- a/apps/backend/agents/__init__.py +++ b/apps/backend/agents/__init__.py @@ -14,6 +14,10 @@ Uses lazy imports to avoid circular dependencies. """ +# Explicit import required by CodeQL static analysis +# (CodeQL doesn't recognize __getattr__ dynamic exports) +from .utils import sync_spec_to_source + __all__ = [ # Main API "run_autonomous_agent", @@ -32,7 +36,7 @@ "load_implementation_plan", "find_subtask_in_plan", "find_phase_for_subtask", - "sync_plan_to_source", + "sync_spec_to_source", # Constants "AUTO_CONTINUE_DELAY_SECONDS", "HUMAN_INTERVENTION_FILE", @@ -77,7 +81,7 @@ def __getattr__(name): "get_commit_count", "get_latest_commit", "load_implementation_plan", - "sync_plan_to_source", + "sync_spec_to_source", ): from .utils import ( find_phase_for_subtask, @@ -85,7 +89,7 @@ def __getattr__(name): get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) return locals()[name] diff --git a/apps/backend/agents/coder.py b/apps/backend/agents/coder.py index 39d43b30a0..863aef1c7d 100644 --- a/apps/backend/agents/coder.py +++ b/apps/backend/agents/coder.py @@ -7,6 +7,7 @@ import asyncio import logging +import os from pathlib import Path from core.client import create_client @@ -37,6 +38,7 @@ ) from prompts import is_first_run from recovery import RecoveryManager +from security.constants import PROJECT_DIR_ENV_VAR from task_logger import ( LogPhase, get_task_logger, @@ -62,7 +64,7 @@ get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) logger = logging.getLogger(__name__) @@ -90,6 +92,10 @@ async def run_autonomous_agent( verbose: Whether to show detailed output source_spec_dir: Original spec directory in main project (for syncing from worktree) """ + # Set environment variable for security hooks to find the correct project directory + # This is needed because os.getcwd() may return the wrong directory in worktree mode + os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve()) + # Initialize recovery manager (handles memory persistence) recovery_manager = RecoveryManager(spec_dir, project_dir) @@ -404,7 +410,7 @@ async def run_autonomous_agent( print_status("Linear notified of stuck subtask", "info") elif is_planning_phase and source_spec_dir: # After planning phase, sync the newly created implementation plan back to source - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): print_status("Implementation plan synced to main project", "success") # Handle session status diff --git a/apps/backend/agents/session.py b/apps/backend/agents/session.py index 89a5d5d48c..263bf17efb 100644 --- a/apps/backend/agents/session.py +++ b/apps/backend/agents/session.py @@ -40,7 +40,7 @@ get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) logger = logging.getLogger(__name__) @@ -82,7 +82,7 @@ async def post_session_processing( print(muted("--- Post-Session Processing ---")) # Sync implementation plan back to source (for worktree mode) - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): print_status("Implementation plan synced to main project", "success") # Check if implementation plan was updated @@ -445,8 +445,9 @@ async def run_agent_session( result_content = getattr(block, "content", "") is_error = getattr(block, "is_error", False) - # Check if command was blocked by security hook - if "blocked" in str(result_content).lower(): + # Check if this is an error (not just content containing "blocked") + if is_error and "blocked" in str(result_content).lower(): + # Actual blocked command by security hook debug_error( "session", f"Tool BLOCKED: {current_tool}", diff --git a/apps/backend/agents/tools_pkg/tools/memory.py b/apps/backend/agents/tools_pkg/tools/memory.py index ac361ab78c..b5367663e9 100644 --- a/apps/backend/agents/tools_pkg/tools/memory.py +++ b/apps/backend/agents/tools_pkg/tools/memory.py @@ -4,9 +4,16 @@ Tools for recording and retrieving session memory, including discoveries, gotchas, and patterns. + +Dual-storage approach: +- File-based: Always available, works offline, spec-specific +- LadybugDB: When Graphiti is enabled, also saves to graph database for + cross-session retrieval and Memory UI display """ +import asyncio import json +import logging from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -19,6 +26,108 @@ SDK_TOOLS_AVAILABLE = False tool = None +logger = logging.getLogger(__name__) + + +async def _save_to_graphiti_async( + spec_dir: Path, + project_dir: Path, + save_type: str, + data: dict, +) -> bool: + """ + Save data to Graphiti/LadybugDB (async implementation). + + Args: + spec_dir: Spec directory for GraphitiMemory initialization + project_dir: Project root directory + save_type: Type of save - 'discovery', 'gotcha', or 'pattern' + data: Data to save + + Returns: + True if save succeeded, False otherwise + """ + try: + # Check if Graphiti is enabled + from graphiti_config import is_graphiti_enabled + + if not is_graphiti_enabled(): + return False + + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + + memory = GraphitiMemory(spec_dir, project_dir) + try: + if save_type == "discovery": + # Save as codebase discovery + # Format: {file_path: description} + result = await memory.save_codebase_discoveries( + {data["file_path"]: data["description"]} + ) + elif save_type == "gotcha": + # Save as gotcha + gotcha_text = data["gotcha"] + if data.get("context"): + gotcha_text += f" (Context: {data['context']})" + result = await memory.save_gotcha(gotcha_text) + elif save_type == "pattern": + # Save as pattern + result = await memory.save_pattern(data["pattern"]) + else: + result = False + return result + finally: + await memory.close() + + except ImportError as e: + logger.debug(f"Graphiti not available for memory tools: {e}") + return False + except Exception as e: + logger.warning(f"Failed to save to Graphiti: {e}") + return False + + +def _save_to_graphiti_sync( + spec_dir: Path, + project_dir: Path, + save_type: str, + data: dict, +) -> bool: + """ + Save data to Graphiti/LadybugDB (synchronous wrapper for sync contexts only). + + NOTE: This should only be called from synchronous code. For async callers, + use _save_to_graphiti_async() directly to ensure proper resource cleanup. + + Args: + spec_dir: Spec directory for GraphitiMemory initialization + project_dir: Project root directory + save_type: Type of save - 'discovery', 'gotcha', or 'pattern' + data: Data to save + + Returns: + True if save succeeded, False otherwise + """ + try: + # Check if we're already in an async context + try: + asyncio.get_running_loop() + # We're in an async context - caller should use _save_to_graphiti_async + # Log a warning and return False to avoid the resource leak bug + logger.warning( + "_save_to_graphiti_sync called from async context. " + "Use _save_to_graphiti_async instead for proper cleanup." + ) + return False + except RuntimeError: + # No running loop - safe to create one + return asyncio.run( + _save_to_graphiti_async(spec_dir, project_dir, save_type, data) + ) + except Exception as e: + logger.warning(f"Failed to save to Graphiti: {e}") + return False + def create_memory_tools(spec_dir: Path, project_dir: Path) -> list: """ @@ -45,7 +154,7 @@ def create_memory_tools(spec_dir: Path, project_dir: Path) -> list: {"file_path": str, "description": str, "category": str}, ) async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: - """Record a discovery to the codebase map.""" + """Record a discovery to the codebase map (file + Graphiti).""" file_path = args["file_path"] description = args["description"] category = args.get("category", "general") @@ -54,8 +163,10 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: memory_dir.mkdir(exist_ok=True) codebase_map_file = memory_dir / "codebase_map.json" + saved_to_graphiti = False try: + # PRIMARY: Save to file-based storage (always works) # Load existing map or create new if codebase_map_file.exists(): with open(codebase_map_file) as f: @@ -77,11 +188,23 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: with open(codebase_map_file, "w") as f: json.dump(codebase_map, f, indent=2) + # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI) + saved_to_graphiti = await _save_to_graphiti_async( + spec_dir, + project_dir, + "discovery", + { + "file_path": file_path, + "description": f"[{category}] {description}", + }, + ) + + storage_note = " (also saved to memory graph)" if saved_to_graphiti else "" return { "content": [ { "type": "text", - "text": f"Recorded discovery for '{file_path}': {description}", + "text": f"Recorded discovery for '{file_path}': {description}{storage_note}", } ] } @@ -102,7 +225,7 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: {"gotcha": str, "context": str}, ) async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: - """Record a gotcha to session memory.""" + """Record a gotcha to session memory (file + Graphiti).""" gotcha = args["gotcha"] context = args.get("context", "") @@ -110,8 +233,10 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: memory_dir.mkdir(exist_ok=True) gotchas_file = memory_dir / "gotchas.md" + saved_to_graphiti = False try: + # PRIMARY: Save to file-based storage (always works) timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M") entry = f"\n## [{timestamp}]\n{gotcha}" @@ -126,7 +251,20 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: ) f.write(entry) - return {"content": [{"type": "text", "text": f"Recorded gotcha: {gotcha}"}]} + # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI) + saved_to_graphiti = await _save_to_graphiti_async( + spec_dir, + project_dir, + "gotcha", + {"gotcha": gotcha, "context": context}, + ) + + storage_note = " (also saved to memory graph)" if saved_to_graphiti else "" + return { + "content": [ + {"type": "text", "text": f"Recorded gotcha: {gotcha}{storage_note}"} + ] + } except Exception as e: return { diff --git a/apps/backend/agents/utils.py b/apps/backend/agents/utils.py index 8ce33c9224..cc56cde2b9 100644 --- a/apps/backend/agents/utils.py +++ b/apps/backend/agents/utils.py @@ -23,9 +23,10 @@ def get_latest_commit(project_dir: Path) -> str | None: capture_output=True, text=True, check=True, + timeout=10, ) return result.stdout.strip() - except subprocess.CalledProcessError: + except (subprocess.CalledProcessError, subprocess.TimeoutExpired): return None @@ -38,9 +39,10 @@ def get_commit_count(project_dir: Path) -> int: capture_output=True, text=True, check=True, + timeout=10, ) return int(result.stdout.strip()) - except (subprocess.CalledProcessError, ValueError): + except (subprocess.CalledProcessError, subprocess.TimeoutExpired, ValueError): return 0 @@ -74,16 +76,32 @@ def find_phase_for_subtask(plan: dict, subtask_id: str) -> dict | None: return None -def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: +def sync_spec_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: """ - Sync implementation_plan.json from worktree back to source spec directory. - - When running in isolated mode (worktrees), the agent updates the implementation - plan inside the worktree. This function syncs those changes back to the main - project's spec directory so the frontend/UI can see the progress. + Sync ALL spec files from worktree back to source spec directory. + + When running in isolated mode (worktrees), the agent creates and updates + many files inside the worktree's spec directory. This function syncs ALL + of them back to the main project's spec directory. + + IMPORTANT: Since .auto-claude/ is gitignored, this sync happens to the + local filesystem regardless of what branch the user is on. The worktree + may be on a different branch (e.g., auto-claude/093-task), but the sync + target is always the main project's .auto-claude/specs/ directory. + + Files synced (all files in spec directory): + - implementation_plan.json - Task status and subtask completion + - build-progress.txt - Session-by-session progress notes + - task_logs.json - Execution logs + - review_state.json - QA review state + - critique_report.json - Spec critique findings + - suggested_commit_message.txt - Commit suggestions + - REGRESSION_TEST_REPORT.md - Test regression report + - spec.md, context.json, etc. - Original spec files (for completeness) + - memory/ directory - Codebase map, patterns, gotchas, session insights Args: - spec_dir: Current spec directory (may be inside worktree) + spec_dir: Current spec directory (inside worktree) source_spec_dir: Original spec directory in main project (outside worktree) Returns: @@ -100,17 +118,68 @@ def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: if spec_dir_resolved == source_spec_dir_resolved: return False # Same directory, no sync needed - # Sync the implementation plan - plan_file = spec_dir / "implementation_plan.json" - if not plan_file.exists(): - return False + synced_any = False - source_plan_file = source_spec_dir / "implementation_plan.json" + # Ensure source directory exists + source_spec_dir.mkdir(parents=True, exist_ok=True) try: - shutil.copy2(plan_file, source_plan_file) - logger.debug(f"Synced implementation plan to source: {source_plan_file}") - return True + # Sync all files and directories from worktree spec to source spec + for item in spec_dir.iterdir(): + # Skip symlinks to prevent path traversal attacks + if item.is_symlink(): + logger.warning(f"Skipping symlink during sync: {item.name}") + continue + + source_item = source_spec_dir / item.name + + if item.is_file(): + # Copy file (preserves timestamps) + shutil.copy2(item, source_item) + logger.debug(f"Synced {item.name} to source") + synced_any = True + + elif item.is_dir(): + # Recursively sync directory + _sync_directory(item, source_item) + synced_any = True + except Exception as e: - logger.warning(f"Failed to sync implementation plan to source: {e}") - return False + logger.warning(f"Failed to sync spec directory to source: {e}") + + return synced_any + + +def _sync_directory(source_dir: Path, target_dir: Path) -> None: + """ + Recursively sync a directory from source to target. + + Args: + source_dir: Source directory (in worktree) + target_dir: Target directory (in main project) + """ + # Create target directory if needed + target_dir.mkdir(parents=True, exist_ok=True) + + for item in source_dir.iterdir(): + # Skip symlinks to prevent path traversal attacks + if item.is_symlink(): + logger.warning( + f"Skipping symlink during sync: {source_dir.name}/{item.name}" + ) + continue + + target_item = target_dir / item.name + + if item.is_file(): + shutil.copy2(item, target_item) + logger.debug(f"Synced {source_dir.name}/{item.name} to source") + elif item.is_dir(): + # Recurse into subdirectories + _sync_directory(item, target_item) + + +# Keep the old name as an alias for backward compatibility +def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: + """Alias for sync_spec_to_source for backward compatibility.""" + return sync_spec_to_source(spec_dir, source_spec_dir) diff --git a/apps/backend/analysis/insight_extractor.py b/apps/backend/analysis/insight_extractor.py index 75974d6b59..7b461afbae 100644 --- a/apps/backend/analysis/insight_extractor.py +++ b/apps/backend/analysis/insight_extractor.py @@ -387,12 +387,40 @@ async def run_insight_extraction( # Collect the response response_text = "" + message_count = 0 + text_blocks_found = 0 + async for msg in client.receive_response(): msg_type = type(msg).__name__ + message_count += 1 + if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): - response_text += block.text + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): + text_blocks_found += 1 + if block.text: # Only add non-empty text + response_text += block.text + else: + logger.debug( + f"Found empty TextBlock in response (block #{text_blocks_found})" + ) + + # Log response collection summary + logger.debug( + f"Insight extraction response: {message_count} messages, " + f"{text_blocks_found} text blocks, {len(response_text)} chars collected" + ) + + # Validate we received content before parsing + if not response_text.strip(): + logger.warning( + f"Insight extraction returned empty response. " + f"Messages received: {message_count}, TextBlocks found: {text_blocks_found}. " + f"This may indicate the AI model did not respond with text content." + ) + return None # Parse JSON from response return parse_insights(response_text) @@ -415,6 +443,11 @@ def parse_insights(response_text: str) -> dict | None: # Try to extract JSON from the response text = response_text.strip() + # Early validation - check for empty response + if not text: + logger.warning("Cannot parse insights: response text is empty") + return None + # Handle markdown code blocks if text.startswith("```"): # Remove code block markers @@ -422,17 +455,26 @@ def parse_insights(response_text: str) -> dict | None: # Remove first line (```json or ```) if lines[0].startswith("```"): lines = lines[1:] - # Remove last line if it's `` + # Remove last line if it's ``` if lines and lines[-1].strip() == "```": lines = lines[:-1] - text = "\n".join(lines) + text = "\n".join(lines).strip() + + # Check again after removing code blocks + if not text: + logger.warning( + "Cannot parse insights: response contained only markdown code block markers with no content" + ) + return None try: insights = json.loads(text) # Validate structure if not isinstance(insights, dict): - logger.warning("Insights is not a dict") + logger.warning( + f"Insights is not a dict, got type: {type(insights).__name__}" + ) return None # Ensure required keys exist with defaults @@ -446,7 +488,13 @@ def parse_insights(response_text: str) -> dict | None: except json.JSONDecodeError as e: logger.warning(f"Failed to parse insights JSON: {e}") - logger.debug(f"Response text was: {text[:500]}") + # Show more context in the error message + preview_length = min(500, len(text)) + logger.warning( + f"Response text preview (first {preview_length} chars): {text[:preview_length]}" + ) + if len(text) > preview_length: + logger.warning(f"... (total length: {len(text)} chars)") return None diff --git a/apps/backend/cli/batch_commands.py b/apps/backend/cli/batch_commands.py index 28a82ea90a..959df5eeac 100644 --- a/apps/backend/cli/batch_commands.py +++ b/apps/backend/cli/batch_commands.py @@ -6,6 +6,8 @@ """ import json +import shutil +import subprocess from pathlib import Path from ui import highlight, print_status @@ -184,7 +186,7 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool True if successful """ specs_dir = Path(project_dir) / ".auto-claude" / "specs" - worktrees_dir = Path(project_dir) / ".worktrees" + worktrees_dir = Path(project_dir) / ".auto-claude" / "worktrees" / "tasks" if not specs_dir.exists(): print_status("No specs directory found", "info") @@ -209,8 +211,56 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool print(f" - {spec_name}") wt_path = worktrees_dir / spec_name if wt_path.exists(): - print(f" └─ .worktrees/{spec_name}/") + print(f" └─ .auto-claude/worktrees/tasks/{spec_name}/") print() print("Run with --no-dry-run to actually delete") + else: + # Actually delete specs and worktrees + deleted_count = 0 + for spec_name in completed: + spec_path = specs_dir / spec_name + wt_path = worktrees_dir / spec_name + + # Remove worktree first (if exists) + if wt_path.exists(): + try: + result = subprocess.run( + ["git", "worktree", "remove", "--force", str(wt_path)], + cwd=project_dir, + capture_output=True, + text=True, + timeout=30, + ) + if result.returncode == 0: + print_status(f"Removed worktree: {spec_name}", "success") + else: + # Fallback: remove directory manually if git fails + shutil.rmtree(wt_path, ignore_errors=True) + print_status( + f"Removed worktree directory: {spec_name}", "success" + ) + except subprocess.TimeoutExpired: + # Timeout: fall back to manual removal + shutil.rmtree(wt_path, ignore_errors=True) + print_status( + f"Worktree removal timed out, removed directory: {spec_name}", + "warning", + ) + except Exception as e: + print_status( + f"Failed to remove worktree {spec_name}: {e}", "warning" + ) + + # Remove spec directory + if spec_path.exists(): + try: + shutil.rmtree(spec_path) + print_status(f"Removed spec: {spec_name}", "success") + deleted_count += 1 + except Exception as e: + print_status(f"Failed to remove spec {spec_name}: {e}", "error") + + print() + print_status(f"Cleaned up {deleted_count} spec(s)", "info") return True diff --git a/apps/backend/cli/build_commands.py b/apps/backend/cli/build_commands.py index 19dc17ca6b..ad5766ac54 100644 --- a/apps/backend/cli/build_commands.py +++ b/apps/backend/cli/build_commands.py @@ -79,7 +79,7 @@ def handle_build_command( base_branch: Base branch for worktree creation (default: current branch) """ # Lazy imports to avoid loading heavy modules - from agent import run_autonomous_agent, sync_plan_to_source + from agent import run_autonomous_agent, sync_spec_to_source from debug import ( debug, debug_info, @@ -274,7 +274,7 @@ def handle_build_command( # Sync implementation plan to main project after QA # This ensures the main project has the latest status (human_review) - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): debug_info( "run.py", "Implementation plan synced to main project after QA" ) diff --git a/apps/backend/cli/utils.py b/apps/backend/cli/utils.py index f18954654a..b996902027 100644 --- a/apps/backend/cli/utils.py +++ b/apps/backend/cli/utils.py @@ -15,7 +15,46 @@ sys.path.insert(0, str(_PARENT_DIR)) from core.auth import get_auth_token, get_auth_token_source -from dotenv import load_dotenv + + +def import_dotenv(): + """ + Import and return load_dotenv with helpful error message if not installed. + + This centralized function ensures consistent error messaging across all + runner scripts when python-dotenv is not available. + + Returns: + The load_dotenv function + + Raises: + SystemExit: If dotenv cannot be imported, with helpful installation instructions. + """ + try: + from dotenv import load_dotenv as _load_dotenv + + return _load_dotenv + except ImportError: + sys.exit( + "Error: Required Python package 'python-dotenv' is not installed.\n" + "\n" + "This usually means you're not using the virtual environment.\n" + "\n" + "To fix this:\n" + "1. From the 'apps/backend/' directory, activate the venv:\n" + " source .venv/bin/activate # Linux/macOS\n" + " .venv\\Scripts\\activate # Windows\n" + "\n" + "2. Or install dependencies directly:\n" + " pip install python-dotenv\n" + " pip install -r requirements.txt\n" + "\n" + f"Current Python: {sys.executable}\n" + ) + + +# Load .env with helpful error if dependencies not installed +load_dotenv = import_dotenv() from graphiti_config import get_graphiti_status from linear_integration import LinearManager from linear_updater import is_linear_enabled @@ -28,8 +67,8 @@ muted, ) -# Configuration -DEFAULT_MODEL = "claude-opus-4-5-20251101" +# Configuration - uses shorthand that resolves via API Profile if configured +DEFAULT_MODEL = "sonnet" # Changed from "opus" (fix #433) def setup_environment() -> Path: @@ -82,7 +121,7 @@ def find_spec(project_dir: Path, spec_identifier: str) -> Path | None: return spec_folder # Check worktree specs (for merge-preview, merge, review, discard operations) - worktree_base = project_dir / ".worktrees" + worktree_base = project_dir / ".auto-claude" / "worktrees" / "tasks" if worktree_base.exists(): # Try exact match in worktree worktree_spec = ( diff --git a/apps/backend/cli/workspace_commands.py b/apps/backend/cli/workspace_commands.py index 5e3d68a5aa..88c772feb4 100644 --- a/apps/backend/cli/workspace_commands.py +++ b/apps/backend/cli/workspace_commands.py @@ -22,6 +22,7 @@ get_merge_base, is_lock_file, ) +from core.worktree import WorktreeManager from debug import debug_warning from ui import ( Icons, @@ -67,6 +68,7 @@ def _detect_default_branch(project_dir: Path) -> str: cwd=project_dir, capture_output=True, text=True, + timeout=5, ) if result.returncode == 0: return env_branch @@ -78,6 +80,7 @@ def _detect_default_branch(project_dir: Path) -> str: cwd=project_dir, capture_output=True, text=True, + timeout=5, ) if result.returncode == 0: return branch @@ -90,18 +93,32 @@ def _get_changed_files_from_git( worktree_path: Path, base_branch: str = "main" ) -> list[str]: """ - Get list of changed files from git diff between base branch and HEAD. + Get list of files changed by the task (not files changed on base branch). + + Uses merge-base to accurately identify only the files modified in the worktree, + not files that changed on the base branch since the worktree was created. Args: worktree_path: Path to the worktree base_branch: Base branch to compare against (default: main) Returns: - List of changed file paths + List of changed file paths (task changes only) """ try: + # First, get the merge-base (the point where the worktree branched) + merge_base_result = subprocess.run( + ["git", "merge-base", base_branch, "HEAD"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, + ) + merge_base = merge_base_result.stdout.strip() + + # Use two-dot diff from merge-base to get only task's changes result = subprocess.run( - ["git", "diff", "--name-only", f"{base_branch}...HEAD"], + ["git", "diff", "--name-only", f"{merge_base}..HEAD"], cwd=worktree_path, capture_output=True, text=True, @@ -113,10 +130,10 @@ def _get_changed_files_from_git( # Log the failure before trying fallback debug_warning( "workspace_commands", - f"git diff (three-dot) failed: returncode={e.returncode}, " + f"git diff with merge-base failed: returncode={e.returncode}, " f"stderr={e.stderr.strip() if e.stderr else 'N/A'}", ) - # Fallback: try without the three-dot notation + # Fallback: try direct two-arg diff (less accurate but works) try: result = subprocess.run( ["git", "diff", "--name-only", base_branch, "HEAD"], @@ -131,7 +148,7 @@ def _get_changed_files_from_git( # Log the failure before returning empty list debug_warning( "workspace_commands", - f"git diff (two-arg) failed: returncode={e.returncode}, " + f"git diff (fallback) failed: returncode={e.returncode}, " f"stderr={e.stderr.strip() if e.stderr else 'N/A'}", ) return [] @@ -600,6 +617,13 @@ def handle_merge_preview_command( changed_files=all_changed_files[:10], # Log first 10 ) + # NOTE: We intentionally do NOT have a fast path here. + # Even if commits_behind == 0 (main hasn't moved), we still need to: + # 1. Call refresh_from_git() to update evolution data for this task + # 2. Call preview_merge() to detect potential conflicts with OTHER parallel tasks + # that may be tracked in the evolution data but haven't been merged yet. + # Skipping semantic analysis when commits_behind == 0 would miss these conflicts. + debug(MODULE, "Initializing MergeOrchestrator for preview...") # Initialize the orchestrator @@ -805,3 +829,109 @@ def handle_merge_preview_command( "pathMappedAIMergeCount": 0, }, } + + +def cleanup_old_worktrees_command( + project_dir: Path, days: int = 30, dry_run: bool = False +) -> dict: + """ + Clean up old worktrees that haven't been modified in the specified number of days. + + Args: + project_dir: Project root directory + days: Number of days threshold (default: 30) + dry_run: If True, only show what would be removed (default: False) + + Returns: + Dictionary with cleanup results + """ + try: + manager = WorktreeManager(project_dir) + + removed, failed = manager.cleanup_old_worktrees( + days_threshold=days, dry_run=dry_run + ) + + return { + "success": True, + "removed": removed, + "failed": failed, + "dry_run": dry_run, + "days_threshold": days, + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "removed": [], + "failed": [], + } + + +def worktree_summary_command(project_dir: Path) -> dict: + """ + Get a summary of all worktrees with age information. + + Args: + project_dir: Project root directory + + Returns: + Dictionary with worktree summary data + """ + try: + manager = WorktreeManager(project_dir) + + # Print to console for CLI usage + manager.print_worktree_summary() + + # Also return data for programmatic access + worktrees = manager.list_all_worktrees() + warning = manager.get_worktree_count_warning() + + # Categorize by age + recent = [] + week_old = [] + month_old = [] + very_old = [] + unknown_age = [] + + for info in worktrees: + data = { + "spec_name": info.spec_name, + "days_since_last_commit": info.days_since_last_commit, + "commit_count": info.commit_count, + } + + if info.days_since_last_commit is None: + unknown_age.append(data) + elif info.days_since_last_commit < 7: + recent.append(data) + elif info.days_since_last_commit < 30: + week_old.append(data) + elif info.days_since_last_commit < 90: + month_old.append(data) + else: + very_old.append(data) + + return { + "success": True, + "total_worktrees": len(worktrees), + "categories": { + "recent": recent, + "week_old": week_old, + "month_old": month_old, + "very_old": very_old, + "unknown_age": unknown_age, + }, + "warning": warning, + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "total_worktrees": 0, + "categories": {}, + "warning": None, + } diff --git a/apps/backend/commit_message.py b/apps/backend/commit_message.py index 0518f20fba..b90242590c 100644 --- a/apps/backend/commit_message.py +++ b/apps/backend/commit_message.py @@ -231,7 +231,9 @@ async def _call_claude(prompt: str) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text logger.info(f"Generated commit message: {len(response_text)} chars") diff --git a/apps/backend/core/agent.py b/apps/backend/core/agent.py index 8b2cc8d540..6d9ffe3702 100644 --- a/apps/backend/core/agent.py +++ b/apps/backend/core/agent.py @@ -39,7 +39,7 @@ run_followup_planner, save_session_memory, save_session_to_graphiti, - sync_plan_to_source, + sync_spec_to_source, ) # Ensure all exports are available at module level @@ -57,7 +57,7 @@ "load_implementation_plan", "find_subtask_in_plan", "find_phase_for_subtask", - "sync_plan_to_source", + "sync_spec_to_source", "AUTO_CONTINUE_DELAY_SECONDS", "HUMAN_INTERVENTION_FILE", ] diff --git a/apps/backend/core/auth.py b/apps/backend/core/auth.py index be105e1ff9..ce105a0caf 100644 --- a/apps/backend/core/auth.py +++ b/apps/backend/core/auth.py @@ -23,12 +23,21 @@ # Environment variables to pass through to SDK subprocess # NOTE: ANTHROPIC_API_KEY is intentionally excluded to prevent silent API billing SDK_ENV_VARS = [ + # API endpoint configuration "ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN", + # Model overrides (from API Profile custom model mappings) + "ANTHROPIC_MODEL", + "ANTHROPIC_DEFAULT_HAIKU_MODEL", + "ANTHROPIC_DEFAULT_SONNET_MODEL", + "ANTHROPIC_DEFAULT_OPUS_MODEL", + # SDK behavior configuration "NO_PROXY", "DISABLE_TELEMETRY", "DISABLE_COST_WARNINGS", "API_TIMEOUT_MS", + # Windows-specific: Git Bash path for Claude Code CLI + "CLAUDE_CODE_GIT_BASH_PATH", ] @@ -208,6 +217,85 @@ def require_auth_token() -> str: return token +def _find_git_bash_path() -> str | None: + """ + Find git-bash (bash.exe) path on Windows. + + Uses 'where git' to find git.exe, then derives bash.exe location from it. + Git for Windows installs bash.exe in the 'bin' directory alongside git.exe + or in the parent 'bin' directory when git.exe is in 'cmd'. + + Returns: + Full path to bash.exe if found, None otherwise + """ + if platform.system() != "Windows": + return None + + # If already set in environment, use that + existing = os.environ.get("CLAUDE_CODE_GIT_BASH_PATH") + if existing and os.path.exists(existing): + return existing + + git_path = None + + # Method 1: Use 'where' command to find git.exe + try: + # Use where.exe explicitly for reliability + result = subprocess.run( + ["where.exe", "git"], + capture_output=True, + text=True, + timeout=5, + shell=False, + ) + + if result.returncode == 0 and result.stdout.strip(): + git_paths = result.stdout.strip().splitlines() + if git_paths: + git_path = git_paths[0].strip() + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): + # Intentionally suppress errors - best-effort detection with fallback to common paths + pass + + # Method 2: Check common installation paths if 'where' didn't work + if not git_path: + common_git_paths = [ + os.path.expandvars(r"%PROGRAMFILES%\Git\cmd\git.exe"), + os.path.expandvars(r"%PROGRAMFILES%\Git\bin\git.exe"), + os.path.expandvars(r"%PROGRAMFILES(X86)%\Git\cmd\git.exe"), + os.path.expandvars(r"%LOCALAPPDATA%\Programs\Git\cmd\git.exe"), + ] + for path in common_git_paths: + if os.path.exists(path): + git_path = path + break + + if not git_path: + return None + + # Derive bash.exe location from git.exe location + # Git for Windows structure: + # C:\...\Git\cmd\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + # C:\...\Git\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + # C:\...\Git\mingw64\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + git_dir = os.path.dirname(git_path) + git_parent = os.path.dirname(git_dir) + git_grandparent = os.path.dirname(git_parent) + + # Check common bash.exe locations relative to git installation + possible_bash_paths = [ + os.path.join(git_parent, "bin", "bash.exe"), # cmd -> bin + os.path.join(git_dir, "bash.exe"), # If git.exe is in bin + os.path.join(git_grandparent, "bin", "bash.exe"), # mingw64/bin -> bin + ] + + for bash_path in possible_bash_paths: + if os.path.exists(bash_path): + return bash_path + + return None + + def get_sdk_env_vars() -> dict[str, str]: """ Get environment variables to pass to SDK. @@ -215,6 +303,8 @@ def get_sdk_env_vars() -> dict[str, str]: Collects relevant env vars (ANTHROPIC_BASE_URL, etc.) that should be passed through to the claude-agent-sdk subprocess. + On Windows, auto-detects CLAUDE_CODE_GIT_BASH_PATH if not already set. + Returns: Dict of env var name -> value for non-empty vars """ @@ -223,6 +313,14 @@ def get_sdk_env_vars() -> dict[str, str]: value = os.environ.get(var) if value: env[var] = value + + # On Windows, auto-detect git-bash path if not already set + # Claude Code CLI requires bash.exe to run on Windows + if platform.system() == "Windows" and "CLAUDE_CODE_GIT_BASH_PATH" not in env: + bash_path = _find_git_bash_path() + if bash_path: + env["CLAUDE_CODE_GIT_BASH_PATH"] = bash_path + return env diff --git a/apps/backend/core/client.py b/apps/backend/core/client.py index 3d8dbe8de6..c50978ee17 100644 --- a/apps/backend/core/client.py +++ b/apps/backend/core/client.py @@ -16,6 +16,7 @@ import json import logging import os +import platform import threading import time from pathlib import Path @@ -488,6 +489,12 @@ def create_client( # Collect env vars to pass to SDK (ANTHROPIC_BASE_URL, etc.) sdk_env = get_sdk_env_vars() + # Debug: Log git-bash path detection on Windows + if "CLAUDE_CODE_GIT_BASH_PATH" in sdk_env: + logger.info(f"Git Bash path found: {sdk_env['CLAUDE_CODE_GIT_BASH_PATH']}") + elif platform.system() == "Windows": + logger.warning("Git Bash path not detected on Windows!") + # Check if Linear integration is enabled linear_enabled = is_linear_enabled() linear_api_key = os.environ.get("LINEAR_API_KEY", "") @@ -742,6 +749,9 @@ def create_client( "settings": str(settings_file.resolve()), "env": sdk_env, # Pass ANTHROPIC_BASE_URL etc. to subprocess "max_thinking_tokens": max_thinking_tokens, # Extended thinking budget + # Enable file checkpointing to track file read/write state across tool calls + # This prevents "File has not been read yet" errors in recovery sessions + "enable_file_checkpointing": True, } # Add structured output format if specified diff --git a/apps/backend/core/phase_event.py b/apps/backend/core/phase_event.py index a86321cf02..acc034605b 100644 --- a/apps/backend/core/phase_event.py +++ b/apps/backend/core/phase_event.py @@ -52,4 +52,8 @@ def emit_phase( print(f"{PHASE_MARKER_PREFIX}{json.dumps(payload, default=str)}", flush=True) except (OSError, UnicodeEncodeError) as e: if _DEBUG: - print(f"[phase_event] emit failed: {e}", file=sys.stderr, flush=True) + try: + sys.stderr.write(f"[phase_event] emit failed: {e}\n") + sys.stderr.flush() + except (OSError, UnicodeEncodeError): + pass # Truly silent on complete I/O failure diff --git a/apps/backend/core/workspace.py b/apps/backend/core/workspace.py index ddfd49059b..1f85d083c1 100644 --- a/apps/backend/core/workspace.py +++ b/apps/backend/core/workspace.py @@ -4,7 +4,7 @@ ============================================= Handles workspace isolation through Git worktrees, where each spec -gets its own isolated worktree in .worktrees/{spec-name}/. +gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/. This module has been refactored for better maintainability: - Models and enums: workspace/models.py @@ -90,12 +90,18 @@ def is_debug_enabled(): from core.workspace.git_utils import ( detect_file_renames as _detect_file_renames, ) +from core.workspace.git_utils import ( + get_binary_file_content_from_ref as _get_binary_file_content_from_ref, +) from core.workspace.git_utils import ( get_changed_files_from_branch as _get_changed_files_from_branch, ) from core.workspace.git_utils import ( get_file_content_from_ref as _get_file_content_from_ref, ) +from core.workspace.git_utils import ( + is_binary_file as _is_binary_file, +) from core.workspace.git_utils import ( is_lock_file as _is_lock_file, ) @@ -773,28 +779,44 @@ def _resolve_git_conflicts_with_ai( print(muted(f" Copying {len(new_files)} new file(s) first (dependencies)...")) for file_path, status in new_files: try: - content = _get_file_content_from_ref( - project_dir, spec_branch, file_path - ) - if content is not None: - # Apply path mapping - write to new location if file was renamed - target_file_path = _apply_path_mapping(file_path, path_mappings) - target_path = project_dir / target_file_path - target_path.parent.mkdir(parents=True, exist_ok=True) - target_path.write_text(content, encoding="utf-8") - subprocess.run( - ["git", "add", target_file_path], - cwd=project_dir, - capture_output=True, + # Apply path mapping - write to new location if file was renamed + target_file_path = _apply_path_mapping(file_path, path_mappings) + target_path = project_dir / target_file_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + # Handle binary files differently - use bytes instead of text + if _is_binary_file(file_path): + binary_content = _get_binary_file_content_from_ref( + project_dir, spec_branch, file_path + ) + if binary_content is not None: + target_path.write_bytes(binary_content) + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, + ) + resolved_files.append(target_file_path) + debug(MODULE, f"Copied new binary file: {file_path}") + else: + content = _get_file_content_from_ref( + project_dir, spec_branch, file_path ) - resolved_files.append(target_file_path) - if target_file_path != file_path: - debug( - MODULE, - f"Copied new file with path mapping: {file_path} -> {target_file_path}", + if content is not None: + target_path.write_text(content, encoding="utf-8") + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, ) - else: - debug(MODULE, f"Copied new file: {file_path}") + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Copied new file with path mapping: {file_path} -> {target_file_path}", + ) + else: + debug(MODULE, f"Copied new file: {file_path}") except Exception as e: debug_warning(MODULE, f"Could not copy new file {file_path}: {e}") @@ -1118,24 +1140,44 @@ def _resolve_git_conflicts_with_ai( ) else: # Modified without path change - simple copy - content = _get_file_content_from_ref( - project_dir, spec_branch, file_path - ) - if content is not None: - target_path = project_dir / target_file_path - target_path.parent.mkdir(parents=True, exist_ok=True) - target_path.write_text(content, encoding="utf-8") - subprocess.run( - ["git", "add", target_file_path], - cwd=project_dir, - capture_output=True, + # Check if binary file to use correct read/write method + target_path = project_dir / target_file_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + if _is_binary_file(file_path): + binary_content = _get_binary_file_content_from_ref( + project_dir, spec_branch, file_path + ) + if binary_content is not None: + target_path.write_bytes(binary_content) + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, + ) + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Merged binary with path mapping: {file_path} -> {target_file_path}", + ) + else: + content = _get_file_content_from_ref( + project_dir, spec_branch, file_path ) - resolved_files.append(target_file_path) - if target_file_path != file_path: - debug( - MODULE, - f"Merged with path mapping: {file_path} -> {target_file_path}", + if content is not None: + target_path.write_text(content, encoding="utf-8") + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, ) + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Merged with path mapping: {file_path} -> {target_file_path}", + ) except Exception as e: print(muted(f" Warning: Could not process {file_path}: {e}")) @@ -1431,7 +1473,9 @@ async def _merge_file_with_ai_async( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text if response_text: diff --git a/apps/backend/core/workspace/__init__.py b/apps/backend/core/workspace/__init__.py index e5b5ac711a..db278769ea 100644 --- a/apps/backend/core/workspace/__init__.py +++ b/apps/backend/core/workspace/__init__.py @@ -4,7 +4,7 @@ ============================= Handles workspace isolation through Git worktrees, where each spec -gets its own isolated worktree in .worktrees/{spec-name}/. +gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/. This package provides: - Workspace setup and configuration @@ -62,6 +62,7 @@ MAX_SYNTAX_FIX_RETRIES, MERGE_LOCK_TIMEOUT, _create_conflict_file_with_git, + _get_binary_file_content_from_ref, _get_changed_files_from_branch, _get_file_content_from_ref, _is_binary_file, @@ -70,6 +71,7 @@ _is_process_running, _validate_merged_syntax, create_conflict_file_with_git, + get_binary_file_content_from_ref, get_changed_files_from_branch, get_current_branch, get_existing_build_worktree, @@ -117,6 +119,7 @@ "get_current_branch", "get_existing_build_worktree", "get_file_content_from_ref", + "get_binary_file_content_from_ref", "get_changed_files_from_branch", "is_process_running", "is_binary_file", diff --git a/apps/backend/core/workspace/finalization.py b/apps/backend/core/workspace/finalization.py index 3078f2f8a2..a398391f84 100644 --- a/apps/backend/core/workspace/finalization.py +++ b/apps/backend/core/workspace/finalization.py @@ -169,7 +169,15 @@ def handle_workspace_choice( if staging_path: print(highlight(f" cd {staging_path}")) else: - print(highlight(f" cd {project_dir}/.worktrees/{spec_name}")) + worktree_path = get_existing_build_worktree(project_dir, spec_name) + if worktree_path: + print(highlight(f" cd {worktree_path}")) + else: + print( + highlight( + f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}" + ) + ) # Show likely test/run commands if staging_path: @@ -232,7 +240,15 @@ def handle_workspace_choice( if staging_path: print(highlight(f" cd {staging_path}")) else: - print(highlight(f" cd {project_dir}/.worktrees/{spec_name}")) + worktree_path = get_existing_build_worktree(project_dir, spec_name) + if worktree_path: + print(highlight(f" cd {worktree_path}")) + else: + print( + highlight( + f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}" + ) + ) print() print("When you're ready to add it:") print(highlight(f" python auto-claude/run.py --spec {spec_name} --merge")) diff --git a/apps/backend/core/workspace/git_utils.py b/apps/backend/core/workspace/git_utils.py index c027c4a426..18084c2988 100644 --- a/apps/backend/core/workspace/git_utils.py +++ b/apps/backend/core/workspace/git_utils.py @@ -33,6 +33,7 @@ } BINARY_EXTENSIONS = { + # Images ".png", ".jpg", ".jpeg", @@ -41,6 +42,11 @@ ".webp", ".bmp", ".svg", + ".tiff", + ".tif", + ".heic", + ".heif", + # Documents ".pdf", ".doc", ".docx", @@ -48,32 +54,63 @@ ".xlsx", ".ppt", ".pptx", + # Archives ".zip", ".tar", ".gz", ".rar", ".7z", + ".bz2", + ".xz", + ".zst", + # Executables and libraries ".exe", ".dll", ".so", ".dylib", ".bin", + ".msi", + ".app", + # WebAssembly + ".wasm", + # Audio ".mp3", - ".mp4", ".wav", + ".ogg", + ".flac", + ".aac", + ".m4a", + # Video + ".mp4", ".avi", ".mov", ".mkv", + ".webm", + ".wmv", + ".flv", + # Fonts ".woff", ".woff2", ".ttf", ".otf", ".eot", + # Compiled code ".pyc", ".pyo", ".class", ".o", ".obj", + # Data files + ".dat", + ".db", + ".sqlite", + ".sqlite3", + # Other binary formats + ".cur", + ".ani", + ".pbm", + ".pgm", + ".ppm", } # Merge lock timeout in seconds @@ -222,10 +259,16 @@ def get_existing_build_worktree(project_dir: Path, spec_name: str) -> Path | Non Returns: Path to the worktree if it exists for this spec, None otherwise """ - # Per-spec worktree path: .worktrees/{spec-name}/ - worktree_path = project_dir / ".worktrees" / spec_name - if worktree_path.exists(): - return worktree_path + # New path first + new_path = project_dir / ".auto-claude" / "worktrees" / "tasks" / spec_name + if new_path.exists(): + return new_path + + # Legacy fallback + legacy_path = project_dir / ".worktrees" / spec_name + if legacy_path.exists(): + return legacy_path + return None @@ -244,6 +287,25 @@ def get_file_content_from_ref( return None +def get_binary_file_content_from_ref( + project_dir: Path, ref: str, file_path: str +) -> bytes | None: + """Get binary file content from a git ref (branch, commit, etc.). + + Unlike get_file_content_from_ref, this returns raw bytes without + text decoding, suitable for binary files like images, audio, etc. + """ + result = subprocess.run( + ["git", "show", f"{ref}:{file_path}"], + cwd=project_dir, + capture_output=True, + text=False, # Return bytes, not text + ) + if result.returncode == 0: + return result.stdout + return None + + def get_changed_files_from_branch( project_dir: Path, base_branch: str, @@ -516,5 +578,6 @@ def create_conflict_file_with_git( _is_lock_file = is_lock_file _validate_merged_syntax = validate_merged_syntax _get_file_content_from_ref = get_file_content_from_ref +_get_binary_file_content_from_ref = get_binary_file_content_from_ref _get_changed_files_from_branch = get_changed_files_from_branch _create_conflict_file_with_git = create_conflict_file_with_git diff --git a/apps/backend/core/workspace/models.py b/apps/backend/core/workspace/models.py index cc94413e54..92d2178c95 100644 --- a/apps/backend/core/workspace/models.py +++ b/apps/backend/core/workspace/models.py @@ -249,7 +249,7 @@ def get_next_spec_number(self) -> int: max_number = max(max_number, self._scan_specs_dir(main_specs_dir)) # 2. Scan all worktree specs - worktrees_dir = self.project_dir / ".worktrees" + worktrees_dir = self.project_dir / ".auto-claude" / "worktrees" / "tasks" if worktrees_dir.exists(): for worktree in worktrees_dir.iterdir(): if worktree.is_dir(): diff --git a/apps/backend/core/workspace/setup.py b/apps/backend/core/workspace/setup.py index b5b825722b..6ae33f43b7 100644 --- a/apps/backend/core/workspace/setup.py +++ b/apps/backend/core/workspace/setup.py @@ -13,6 +13,7 @@ from pathlib import Path from merge import FileTimelineTracker +from security.constants import ALLOWLIST_FILENAME, PROFILE_FILENAME from ui import ( Icons, MenuOption, @@ -267,6 +268,43 @@ def setup_workspace( f"Environment files copied: {', '.join(copied_env_files)}", "success" ) + # Copy security configuration files if they exist + # Note: Unlike env files, security files always overwrite to ensure + # the worktree uses the same security rules as the main project. + # This prevents security bypasses through stale worktree configs. + security_files = [ + ALLOWLIST_FILENAME, + PROFILE_FILENAME, + ] + security_files_copied = [] + + for filename in security_files: + source_file = project_dir / filename + if source_file.is_file(): + target_file = worktree_info.path / filename + try: + shutil.copy2(source_file, target_file) + security_files_copied.append(filename) + except (OSError, PermissionError) as e: + debug_warning(MODULE, f"Failed to copy {filename}: {e}") + print_status( + f"Warning: Could not copy {filename} to worktree", "warning" + ) + + if security_files_copied: + print_status( + f"Security config copied: {', '.join(security_files_copied)}", "success" + ) + + # Ensure .auto-claude/ is in the worktree's .gitignore + # This is critical because the worktree inherits .gitignore from the base branch, + # which may not have .auto-claude/ if that change wasn't committed/pushed. + # Without this, spec files would be committed to the worktree's branch. + from init import ensure_gitignore_entry + + if ensure_gitignore_entry(worktree_info.path, ".auto-claude/"): + debug(MODULE, "Added .auto-claude/ to worktree's .gitignore") + # Copy spec files to worktree if provided localized_spec_dir = None if source_spec_dir and source_spec_dir.exists(): diff --git a/apps/backend/core/worktree.py b/apps/backend/core/worktree.py index ab3b89e3b3..eb23a6db93 100644 --- a/apps/backend/core/worktree.py +++ b/apps/backend/core/worktree.py @@ -4,7 +4,7 @@ ============================================= Each spec gets its own worktree: -- Worktree path: .worktrees/{spec-name}/ +- Worktree path: .auto-claude/worktrees/tasks/{spec-name}/ - Branch name: auto-claude/{spec-name} This allows: @@ -20,6 +20,7 @@ import shutil import subprocess from dataclasses import dataclass +from datetime import datetime from pathlib import Path @@ -42,20 +43,22 @@ class WorktreeInfo: files_changed: int = 0 additions: int = 0 deletions: int = 0 + last_commit_date: datetime | None = None + days_since_last_commit: int | None = None class WorktreeManager: """ Manages per-spec Git worktrees. - Each spec gets its own worktree in .worktrees/{spec-name}/ with + Each spec gets its own worktree in .auto-claude/worktrees/tasks/{spec-name}/ with a corresponding branch auto-claude/{spec-name}. """ def __init__(self, project_dir: Path, base_branch: str | None = None): self.project_dir = project_dir self.base_branch = base_branch or self._detect_base_branch() - self.worktrees_dir = project_dir / ".worktrees" + self.worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks" self._merge_lock = asyncio.Lock() def _detect_base_branch(self) -> str: @@ -124,17 +127,37 @@ def _get_current_branch(self) -> str: return result.stdout.strip() def _run_git( - self, args: list[str], cwd: Path | None = None + self, args: list[str], cwd: Path | None = None, timeout: int = 60 ) -> subprocess.CompletedProcess: - """Run a git command and return the result.""" - return subprocess.run( - ["git"] + args, - cwd=cwd or self.project_dir, - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", - ) + """Run a git command and return the result. + + Args: + args: Git command arguments (without 'git' prefix) + cwd: Working directory for the command + timeout: Command timeout in seconds (default: 60) + + Returns: + CompletedProcess with command results. On timeout, returns a + CompletedProcess with returncode=-1 and timeout error in stderr. + """ + try: + return subprocess.run( + ["git"] + args, + cwd=cwd or self.project_dir, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=timeout, + ) + except subprocess.TimeoutExpired: + # Return a failed result on timeout instead of raising + return subprocess.CompletedProcess( + args=["git"] + args, + returncode=-1, + stdout="", + stderr=f"Command timed out after {timeout} seconds", + ) def _unstage_gitignored_files(self) -> None: """ @@ -194,13 +217,24 @@ def _unstage_gitignored_files(self) -> None: def setup(self) -> None: """Create worktrees directory if needed.""" - self.worktrees_dir.mkdir(exist_ok=True) + self.worktrees_dir.mkdir(parents=True, exist_ok=True) # ==================== Per-Spec Worktree Methods ==================== def get_worktree_path(self, spec_name: str) -> Path: - """Get the worktree path for a spec.""" - return self.worktrees_dir / spec_name + """Get the worktree path for a spec (checks new and legacy locations).""" + # New path first + new_path = self.worktrees_dir / spec_name + if new_path.exists(): + return new_path + + # Legacy fallback (.worktrees/ instead of .auto-claude/worktrees/tasks/) + legacy_path = self.project_dir / ".worktrees" / spec_name + if legacy_path.exists(): + return legacy_path + + # Return new path as default for creation + return new_path def get_branch_name(self, spec_name: str) -> str: """Get the branch name for a spec.""" @@ -261,6 +295,8 @@ def _get_worktree_stats(self, spec_name: str) -> dict: "files_changed": 0, "additions": 0, "deletions": 0, + "last_commit_date": None, + "days_since_last_commit": None, } if not worktree_path.exists(): @@ -273,6 +309,52 @@ def _get_worktree_stats(self, spec_name: str) -> dict: if result.returncode == 0: stats["commit_count"] = int(result.stdout.strip() or "0") + # Last commit date (most recent commit in this worktree) + result = self._run_git( + ["log", "-1", "--format=%cd", "--date=iso"], cwd=worktree_path + ) + if result.returncode == 0 and result.stdout.strip(): + try: + # Parse ISO date format: "2026-01-04 00:25:25 +0100" + date_str = result.stdout.strip() + # Convert git format to ISO format for fromisoformat() + # "2026-01-04 00:25:25 +0100" -> "2026-01-04T00:25:25+01:00" + parts = date_str.rsplit(" ", 1) + if len(parts) == 2: + date_part, tz_part = parts + # Convert timezone format: "+0100" -> "+01:00" + if len(tz_part) == 5 and ( + tz_part.startswith("+") or tz_part.startswith("-") + ): + tz_formatted = f"{tz_part[:3]}:{tz_part[3:]}" + iso_str = f"{date_part.replace(' ', 'T')}{tz_formatted}" + last_commit_date = datetime.fromisoformat(iso_str) + stats["last_commit_date"] = last_commit_date + # Use timezone-aware now() for accurate comparison + now_aware = datetime.now(last_commit_date.tzinfo) + stats["days_since_last_commit"] = ( + now_aware - last_commit_date + ).days + else: + # Fallback for unexpected timezone format + last_commit_date = datetime.strptime( + parts[0], "%Y-%m-%d %H:%M:%S" + ) + stats["last_commit_date"] = last_commit_date + stats["days_since_last_commit"] = ( + datetime.now() - last_commit_date + ).days + else: + # No timezone in output + last_commit_date = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S") + stats["last_commit_date"] = last_commit_date + stats["days_since_last_commit"] = ( + datetime.now() - last_commit_date + ).days + except (ValueError, TypeError) as e: + # If parsing fails, silently continue without date info + pass + # Diff stats result = self._run_git( ["diff", "--shortstat", f"{self.base_branch}...HEAD"], cwd=worktree_path @@ -327,9 +409,33 @@ def create_worktree(self, spec_name: str) -> WorktreeInfo: # Delete branch if it exists (from previous attempt) self._run_git(["branch", "-D", branch_name]) - # Create worktree with new branch from base + # Fetch latest from remote to ensure we have the most up-to-date code + # GitHub/remote is the source of truth, not the local branch + fetch_result = self._run_git(["fetch", "origin", self.base_branch]) + if fetch_result.returncode != 0: + print( + f"Warning: Could not fetch {self.base_branch} from origin: {fetch_result.stderr}" + ) + print("Falling back to local branch...") + + # Determine the start point for the worktree + # Prefer origin/{base_branch} (remote) over local branch to ensure we have latest code + remote_ref = f"origin/{self.base_branch}" + start_point = self.base_branch # Default to local branch + + # Check if remote ref exists and use it as the source of truth + check_remote = self._run_git(["rev-parse", "--verify", remote_ref]) + if check_remote.returncode == 0: + start_point = remote_ref + print(f"Creating worktree from remote: {remote_ref}") + else: + print( + f"Remote ref {remote_ref} not found, using local branch: {self.base_branch}" + ) + + # Create worktree with new branch from the start point (remote preferred) result = self._run_git( - ["worktree", "add", "-b", branch_name, str(worktree_path), self.base_branch] + ["worktree", "add", "-b", branch_name, str(worktree_path), start_point] ) if result.returncode != 0: @@ -475,17 +581,27 @@ def commit_in_worktree(self, spec_name: str, message: str) -> bool: # ==================== Listing & Discovery ==================== def list_all_worktrees(self) -> list[WorktreeInfo]: - """List all spec worktrees.""" + """List all spec worktrees (includes legacy .worktrees/ location).""" worktrees = [] - - if not self.worktrees_dir.exists(): - return worktrees - - for item in self.worktrees_dir.iterdir(): - if item.is_dir(): - info = self.get_worktree_info(item.name) - if info: - worktrees.append(info) + seen_specs = set() + + # Check new location first + if self.worktrees_dir.exists(): + for item in self.worktrees_dir.iterdir(): + if item.is_dir(): + info = self.get_worktree_info(item.name) + if info: + worktrees.append(info) + seen_specs.add(item.name) + + # Check legacy location (.worktrees/) + legacy_dir = self.project_dir / ".worktrees" + if legacy_dir.exists(): + for item in legacy_dir.iterdir(): + if item.is_dir() and item.name not in seen_specs: + info = self.get_worktree_info(item.name) + if info: + worktrees.append(info) return worktrees @@ -587,81 +703,187 @@ def get_test_commands(self, spec_name: str) -> list[str]: return commands - # ==================== Backward Compatibility ==================== - # These methods provide backward compatibility with the old single-worktree API + def has_uncommitted_changes(self, spec_name: str | None = None) -> bool: + """Check if there are uncommitted changes.""" + cwd = None + if spec_name: + worktree_path = self.get_worktree_path(spec_name) + if worktree_path.exists(): + cwd = worktree_path + result = self._run_git(["status", "--porcelain"], cwd=cwd) + return bool(result.stdout.strip()) - def get_staging_path(self) -> Path | None: - """ - Backward compatibility: Get path to any existing spec worktree. - Prefer using get_worktree_path(spec_name) instead. - """ - worktrees = self.list_all_worktrees() - if worktrees: - return worktrees[0].path - return None + # ==================== Worktree Cleanup Methods ==================== - def get_staging_info(self) -> WorktreeInfo | None: - """ - Backward compatibility: Get info about any existing spec worktree. - Prefer using get_worktree_info(spec_name) instead. + def get_old_worktrees( + self, days_threshold: int = 30, include_stats: bool = False + ) -> list[WorktreeInfo] | list[str]: """ - worktrees = self.list_all_worktrees() - if worktrees: - return worktrees[0] - return None + Find worktrees that haven't been modified in the specified number of days. - def merge_staging(self, delete_after: bool = True) -> bool: - """ - Backward compatibility: Merge first found worktree. - Prefer using merge_worktree(spec_name) instead. - """ - worktrees = self.list_all_worktrees() - if worktrees: - return self.merge_worktree(worktrees[0].spec_name, delete_after) - return False + Args: + days_threshold: Number of days without activity to consider a worktree old (default: 30) + include_stats: If True, return full WorktreeInfo objects; if False, return just spec names - def remove_staging(self, delete_branch: bool = True) -> None: - """ - Backward compatibility: Remove first found worktree. - Prefer using remove_worktree(spec_name) instead. + Returns: + List of old worktrees (either WorktreeInfo objects or spec names based on include_stats) """ - worktrees = self.list_all_worktrees() - if worktrees: - self.remove_worktree(worktrees[0].spec_name, delete_branch) + old_worktrees = [] - def get_or_create_staging(self, spec_name: str) -> WorktreeInfo: - """ - Backward compatibility: Alias for get_or_create_worktree. - """ - return self.get_or_create_worktree(spec_name) + for worktree_info in self.list_all_worktrees(): + # Skip if we can't determine age + if worktree_info.days_since_last_commit is None: + continue + + if worktree_info.days_since_last_commit >= days_threshold: + if include_stats: + old_worktrees.append(worktree_info) + else: + old_worktrees.append(worktree_info.spec_name) + + return old_worktrees - def staging_exists(self) -> bool: + def cleanup_old_worktrees( + self, days_threshold: int = 30, dry_run: bool = False + ) -> tuple[list[str], list[str]]: """ - Backward compatibility: Check if any spec worktree exists. - Prefer using worktree_exists(spec_name) instead. + Remove worktrees that haven't been modified in the specified number of days. + + Args: + days_threshold: Number of days without activity to consider a worktree old (default: 30) + dry_run: If True, only report what would be removed without actually removing + + Returns: + Tuple of (removed_specs, failed_specs) containing spec names """ - return len(self.list_all_worktrees()) > 0 + old_worktrees = self.get_old_worktrees( + days_threshold=days_threshold, include_stats=True + ) + + if not old_worktrees: + print(f"No worktrees found older than {days_threshold} days.") + return ([], []) - def commit_in_staging(self, message: str) -> bool: + removed = [] + failed = [] + + if dry_run: + print(f"\n[DRY RUN] Would remove {len(old_worktrees)} old worktrees:") + for info in old_worktrees: + print( + f" - {info.spec_name} (last activity: {info.days_since_last_commit} days ago)" + ) + return ([], []) + + print(f"\nRemoving {len(old_worktrees)} old worktrees...") + for info in old_worktrees: + try: + self.remove_worktree(info.spec_name, delete_branch=True) + removed.append(info.spec_name) + print( + f" βœ“ Removed {info.spec_name} (last activity: {info.days_since_last_commit} days ago)" + ) + except Exception as e: + failed.append(info.spec_name) + print(f" βœ— Failed to remove {info.spec_name}: {e}") + + if removed: + print(f"\nSuccessfully removed {len(removed)} worktree(s).") + if failed: + print(f"Failed to remove {len(failed)} worktree(s).") + + return (removed, failed) + + def get_worktree_count_warning( + self, warning_threshold: int = 10, critical_threshold: int = 20 + ) -> str | None: """ - Backward compatibility: Commit in first found worktree. - Prefer using commit_in_worktree(spec_name, message) instead. + Check worktree count and return a warning message if threshold is exceeded. + + Args: + warning_threshold: Number of worktrees to trigger a warning (default: 10) + critical_threshold: Number of worktrees to trigger a critical warning (default: 20) + + Returns: + Warning message string if threshold exceeded, None otherwise """ worktrees = self.list_all_worktrees() - if worktrees: - return self.commit_in_worktree(worktrees[0].spec_name, message) - return False + count = len(worktrees) + + if count >= critical_threshold: + old_worktrees = self.get_old_worktrees(days_threshold=30) + old_count = len(old_worktrees) + return ( + f"CRITICAL: {count} worktrees detected! " + f"Consider cleaning up old worktrees ({old_count} are 30+ days old). " + f"Run cleanup to remove stale worktrees." + ) + elif count >= warning_threshold: + old_worktrees = self.get_old_worktrees(days_threshold=30) + old_count = len(old_worktrees) + return ( + f"WARNING: {count} worktrees detected. " + f"{old_count} are 30+ days old and may be safe to clean up." + ) - def has_uncommitted_changes(self, in_staging: bool = False) -> bool: - """Check if there are uncommitted changes.""" + return None + + def print_worktree_summary(self) -> None: + """Print a summary of all worktrees with age information.""" worktrees = self.list_all_worktrees() - if in_staging and worktrees: - cwd = worktrees[0].path - else: - cwd = None - result = self._run_git(["status", "--porcelain"], cwd=cwd) - return bool(result.stdout.strip()) + if not worktrees: + print("No worktrees found.") + return -# Keep STAGING_WORKTREE_NAME for backward compatibility in imports -STAGING_WORKTREE_NAME = "auto-claude" + print(f"\n{'=' * 80}") + print(f"Worktree Summary ({len(worktrees)} total)") + print(f"{'=' * 80}\n") + + # Group by age + recent = [] # < 7 days + week_old = [] # 7-30 days + month_old = [] # 30-90 days + very_old = [] # > 90 days + unknown_age = [] + + for info in worktrees: + if info.days_since_last_commit is None: + unknown_age.append(info) + elif info.days_since_last_commit < 7: + recent.append(info) + elif info.days_since_last_commit < 30: + week_old.append(info) + elif info.days_since_last_commit < 90: + month_old.append(info) + else: + very_old.append(info) + + def print_group(title: str, items: list[WorktreeInfo]): + if not items: + return + print(f"{title} ({len(items)}):") + for info in sorted(items, key=lambda x: x.spec_name): + age_str = ( + f"{info.days_since_last_commit}d ago" + if info.days_since_last_commit is not None + else "unknown" + ) + print(f" - {info.spec_name} (last activity: {age_str})") + print() + + print_group("Recent (< 7 days)", recent) + print_group("Week Old (7-30 days)", week_old) + print_group("Month Old (30-90 days)", month_old) + print_group("Very Old (> 90 days)", very_old) + print_group("Unknown Age", unknown_age) + + # Print cleanup suggestions + if month_old or very_old: + total_old = len(month_old) + len(very_old) + print(f"{'=' * 80}") + print( + f"πŸ’‘ Suggestion: {total_old} worktree(s) are 30+ days old and may be safe to clean up." + ) + print(" Review these worktrees and run cleanup if no longer needed.") + print(f"{'=' * 80}\n") diff --git a/apps/backend/ideation/config.py b/apps/backend/ideation/config.py index 9f650b78da..0f56a893d3 100644 --- a/apps/backend/ideation/config.py +++ b/apps/backend/ideation/config.py @@ -25,7 +25,7 @@ def __init__( include_roadmap_context: bool = True, include_kanban_context: bool = True, max_ideas_per_type: int = 5, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, append: bool = False, diff --git a/apps/backend/ideation/generator.py b/apps/backend/ideation/generator.py index 4e3005040e..dcd347041b 100644 --- a/apps/backend/ideation/generator.py +++ b/apps/backend/ideation/generator.py @@ -17,7 +17,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent)) from client import create_client -from phase_config import get_thinking_budget +from phase_config import get_thinking_budget, resolve_model_id from ui import print_status # Ideation types @@ -56,7 +56,7 @@ def __init__( self, project_dir: Path, output_dir: Path, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", max_ideas_per_type: int = 5, ): @@ -94,7 +94,7 @@ async def run_agent( client = create_client( self.project_dir, self.output_dir, - self.model, + resolve_model_id(self.model), max_thinking_tokens=self.thinking_budget, ) @@ -187,7 +187,7 @@ async def run_recovery_agent( client = create_client( self.project_dir, self.output_dir, - self.model, + resolve_model_id(self.model), max_thinking_tokens=self.thinking_budget, ) diff --git a/apps/backend/ideation/runner.py b/apps/backend/ideation/runner.py index 1e1537037a..c20d41f839 100644 --- a/apps/backend/ideation/runner.py +++ b/apps/backend/ideation/runner.py @@ -41,7 +41,7 @@ def __init__( include_roadmap_context: bool = True, include_kanban_context: bool = True, max_ideas_per_type: int = 5, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, append: bool = False, diff --git a/apps/backend/ideation/types.py b/apps/backend/ideation/types.py index 7180f1e0f0..c2c391d630 100644 --- a/apps/backend/ideation/types.py +++ b/apps/backend/ideation/types.py @@ -31,6 +31,6 @@ class IdeationConfig: include_roadmap_context: bool = True include_kanban_context: bool = True max_ideas_per_type: int = 5 - model: str = "claude-opus-4-5-20251101" + model: str = "sonnet" # Changed from "opus" (fix #433) refresh: bool = False append: bool = False # If True, preserve existing ideas when merging diff --git a/apps/backend/init.py b/apps/backend/init.py index c6aee373d4..5f1962b44e 100644 --- a/apps/backend/init.py +++ b/apps/backend/init.py @@ -6,6 +6,32 @@ from pathlib import Path +# All entries that should be added to .gitignore for auto-claude projects +AUTO_CLAUDE_GITIGNORE_ENTRIES = [ + ".auto-claude/", + ".auto-claude-security.json", + ".auto-claude-status", + ".claude_settings.json", + ".worktrees/", + ".security-key", + "logs/security/", +] + + +def _entry_exists_in_gitignore(lines: list[str], entry: str) -> bool: + """Check if an entry already exists in gitignore (handles trailing slash variations).""" + entry_normalized = entry.rstrip("/") + for line in lines: + line_stripped = line.strip() + # Match both "entry" and "entry/" + if ( + line_stripped == entry + or line_stripped == entry_normalized + or line_stripped == entry_normalized + "/" + ): + return True + return False + def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> bool: """ @@ -27,17 +53,8 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b content = gitignore_path.read_text() lines = content.splitlines() - # Check if entry already exists (exact match or with trailing newline variations) - entry_normalized = entry.rstrip("/") - for line in lines: - line_stripped = line.strip() - # Match both ".auto-claude" and ".auto-claude/" - if ( - line_stripped == entry - or line_stripped == entry_normalized - or line_stripped == entry_normalized + "/" - ): - return False # Already exists + if _entry_exists_in_gitignore(lines, entry): + return False # Already exists # Entry doesn't exist, append it # Ensure file ends with newline before adding our entry @@ -59,11 +76,58 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b return True +def ensure_all_gitignore_entries(project_dir: Path) -> list[str]: + """ + Ensure all auto-claude related entries exist in the project's .gitignore file. + + Creates .gitignore if it doesn't exist. + + Args: + project_dir: The project root directory + + Returns: + List of entries that were added (empty if all already existed) + """ + gitignore_path = project_dir / ".gitignore" + added_entries: list[str] = [] + + # Read existing content or start fresh + if gitignore_path.exists(): + content = gitignore_path.read_text() + lines = content.splitlines() + else: + content = "" + lines = [] + + # Find entries that need to be added + entries_to_add = [ + entry + for entry in AUTO_CLAUDE_GITIGNORE_ENTRIES + if not _entry_exists_in_gitignore(lines, entry) + ] + + if not entries_to_add: + return [] + + # Build the new content to append + # Ensure file ends with newline before adding our entries + if content and not content.endswith("\n"): + content += "\n" + + content += "\n# Auto Claude generated files\n" + for entry in entries_to_add: + content += entry + "\n" + added_entries.append(entry) + + gitignore_path.write_text(content) + return added_entries + + def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]: """ Initialize the .auto-claude directory for a project. - Creates the directory if needed and ensures it's in .gitignore. + Creates the directory if needed and ensures all auto-claude files are in .gitignore. Args: project_dir: The project root directory @@ -78,16 +142,18 @@ def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]: dir_created = not auto_claude_dir.exists() auto_claude_dir.mkdir(parents=True, exist_ok=True) - # Ensure .auto-claude is in .gitignore (only on first creation) + # Ensure all auto-claude entries are in .gitignore (only on first creation) gitignore_updated = False if dir_created: - gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/") + added = ensure_all_gitignore_entries(project_dir) + gitignore_updated = len(added) > 0 else: # Even if dir exists, check gitignore on first run # Use a marker file to track if we've already checked marker = auto_claude_dir / ".gitignore_checked" if not marker.exists(): - gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/") + added = ensure_all_gitignore_entries(project_dir) + gitignore_updated = len(added) > 0 marker.touch() return auto_claude_dir, gitignore_updated @@ -109,3 +175,36 @@ def get_auto_claude_dir(project_dir: Path, ensure_exists: bool = True) -> Path: return auto_claude_dir return Path(project_dir) / ".auto-claude" + + +def repair_gitignore(project_dir: Path) -> list[str]: + """ + Repair an existing project's .gitignore to include all auto-claude entries. + + This is useful for projects created before all entries were being added, + or when gitignore entries were manually removed. + + Also resets the .gitignore_checked marker to allow future updates. + + Args: + project_dir: The project root directory + + Returns: + List of entries that were added (empty if all already existed) + """ + project_dir = Path(project_dir) + auto_claude_dir = project_dir / ".auto-claude" + + # Remove the marker file so future checks will also run + marker = auto_claude_dir / ".gitignore_checked" + if marker.exists(): + marker.unlink() + + # Add all missing entries + added = ensure_all_gitignore_entries(project_dir) + + # Re-create the marker + if auto_claude_dir.exists(): + marker.touch() + + return added diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py index f2af6fd32f..4dbbc3e61e 100644 --- a/apps/backend/integrations/graphiti/config.py +++ b/apps/backend/integrations/graphiti/config.py @@ -622,10 +622,23 @@ def get_graphiti_status() -> dict: status["errors"] = errors # Errors are informational - embedder is optional (keyword search fallback) - # Available if is_valid() returns True (just needs enabled flag) - status["available"] = config.is_valid() - if not status["available"]: + # CRITICAL FIX: Actually verify packages are importable before reporting available + # Don't just check config.is_valid() - actually try to import the module + if not config.is_valid(): status["reason"] = errors[0] if errors else "Configuration invalid" + return status + + # Try importing the required Graphiti packages + try: + # Attempt to import the main graphiti_memory module + import graphiti_core # noqa: F401 + from graphiti_core.driver.falkordb_driver import FalkorDriver # noqa: F401 + + # If we got here, packages are importable + status["available"] = True + except ImportError as e: + status["available"] = False + status["reason"] = f"Graphiti packages not installed: {e}" return status diff --git a/apps/backend/integrations/linear/updater.py b/apps/backend/integrations/linear/updater.py index d102642fab..02d3880cfc 100644 --- a/apps/backend/integrations/linear/updater.py +++ b/apps/backend/integrations/linear/updater.py @@ -118,6 +118,7 @@ def _create_linear_client() -> ClaudeSDKClient: get_sdk_env_vars, require_auth_token, ) + from phase_config import resolve_model_id require_auth_token() # Raises ValueError if no token found ensure_claude_code_oauth_token() @@ -130,7 +131,7 @@ def _create_linear_client() -> ClaudeSDKClient: return ClaudeSDKClient( options=ClaudeAgentOptions( - model="claude-haiku-4-5", # Fast & cheap model for simple API calls + model=resolve_model_id("haiku"), # Resolves via API Profile if configured system_prompt="You are a Linear API assistant. Execute the requested Linear operation precisely.", allowed_tools=LINEAR_TOOLS, mcp_servers={ diff --git a/apps/backend/merge/__init__.py b/apps/backend/merge/__init__.py index 99dc35d269..7ac715a964 100644 --- a/apps/backend/merge/__init__.py +++ b/apps/backend/merge/__init__.py @@ -9,7 +9,7 @@ traditional merge conflicts. Components: -- SemanticAnalyzer: Tree-sitter based semantic change extraction +- SemanticAnalyzer: Regex-based semantic change extraction - ConflictDetector: Rule-based conflict detection and compatibility analysis - AutoMerger: Deterministic merge strategies (no AI needed) - AIResolver: Minimal-context AI resolution for ambiguous conflicts diff --git a/apps/backend/merge/ai_resolver/claude_client.py b/apps/backend/merge/ai_resolver/claude_client.py index 77229043c5..40e118f923 100644 --- a/apps/backend/merge/ai_resolver/claude_client.py +++ b/apps/backend/merge/ai_resolver/claude_client.py @@ -82,7 +82,9 @@ async def _run_merge() -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text logger.info(f"AI merge response: {len(response_text)} chars") diff --git a/apps/backend/merge/file_evolution/modification_tracker.py b/apps/backend/merge/file_evolution/modification_tracker.py index b4cc281ae6..115a175ac7 100644 --- a/apps/backend/merge/file_evolution/modification_tracker.py +++ b/apps/backend/merge/file_evolution/modification_tracker.py @@ -87,8 +87,8 @@ def record_modification( # Get or create evolution if rel_path not in evolutions: - logger.warning(f"File {rel_path} not being tracked") - # Note: We could auto-create here, but for now return None + # Debug level: this is expected for files not in baseline (e.g., from main's changes) + logger.debug(f"File {rel_path} not in evolution tracking - skipping") return None evolution = evolutions.get(rel_path) @@ -157,9 +157,21 @@ def refresh_from_git( ) try: - # Get list of files changed in the worktree vs target branch + # Get the merge-base to accurately identify task-only changes + # Using two-dot diff (merge-base..HEAD) returns only files changed by the task, + # not files changed on the target branch since divergence + merge_base_result = subprocess.run( + ["git", "merge-base", target_branch, "HEAD"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, + ) + merge_base = merge_base_result.stdout.strip() + + # Get list of files changed in the worktree since the merge-base result = subprocess.run( - ["git", "diff", "--name-only", f"{target_branch}...HEAD"], + ["git", "diff", "--name-only", f"{merge_base}..HEAD"], cwd=worktree_path, capture_output=True, text=True, @@ -175,54 +187,82 @@ def refresh_from_git( else changed_files, ) + processed_count = 0 for file_path in changed_files: - # Get the diff for this file - diff_result = subprocess.run( - ["git", "diff", f"{target_branch}...HEAD", "--", file_path], - cwd=worktree_path, - capture_output=True, - text=True, - check=True, - ) - - # Get content before (from target branch) and after (current) try: - show_result = subprocess.run( - ["git", "show", f"{target_branch}:{file_path}"], + # Get the diff for this file (using merge-base for accurate task-only diff) + diff_result = subprocess.run( + ["git", "diff", f"{merge_base}..HEAD", "--", file_path], cwd=worktree_path, capture_output=True, text=True, check=True, ) - old_content = show_result.stdout - except subprocess.CalledProcessError: - # File is new - old_content = "" - current_file = worktree_path / file_path - if current_file.exists(): + # Get content before (from merge-base - the point where task branched) try: - new_content = current_file.read_text(encoding="utf-8") - except UnicodeDecodeError: - new_content = current_file.read_text( - encoding="utf-8", errors="replace" + show_result = subprocess.run( + ["git", "show", f"{merge_base}:{file_path}"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, ) - else: - # File was deleted - new_content = "" - - # Record the modification - self.record_modification( - task_id=task_id, - file_path=file_path, - old_content=old_content, - new_content=new_content, - evolutions=evolutions, - raw_diff=diff_result.stdout, - ) + old_content = show_result.stdout + except subprocess.CalledProcessError: + # File is new + old_content = "" + + current_file = worktree_path / file_path + if current_file.exists(): + try: + new_content = current_file.read_text(encoding="utf-8") + except UnicodeDecodeError: + new_content = current_file.read_text( + encoding="utf-8", errors="replace" + ) + else: + # File was deleted + new_content = "" + + # Auto-create FileEvolution entry if not already tracked + # This handles retroactive tracking when capture_baselines wasn't called + rel_path = self.storage.get_relative_path(file_path) + if rel_path not in evolutions: + evolutions[rel_path] = FileEvolution( + file_path=rel_path, + baseline_commit=merge_base, + baseline_captured_at=datetime.now(), + baseline_content_hash=compute_content_hash(old_content), + baseline_snapshot_path="", # Not storing baseline file + task_snapshots=[], + ) + debug( + MODULE, + f"Auto-created evolution entry for {rel_path}", + baseline_commit=merge_base[:8], + ) + + # Record the modification + self.record_modification( + task_id=task_id, + file_path=file_path, + old_content=old_content, + new_content=new_content, + evolutions=evolutions, + raw_diff=diff_result.stdout, + ) + processed_count += 1 + + except subprocess.CalledProcessError as e: + # Log error but continue with remaining files + logger.warning( + f"Failed to process {file_path} in refresh_from_git: {e}" + ) + continue logger.info( - f"Refreshed {len(changed_files)} files from worktree for task {task_id}" + f"Refreshed {processed_count}/{len(changed_files)} files from worktree for task {task_id}" ) except subprocess.CalledProcessError as e: @@ -248,35 +288,23 @@ def mark_task_completed( def _detect_target_branch(self, worktree_path: Path) -> str: """ - Detect the target branch to compare against for a worktree. + Detect the base branch to compare against for a worktree. - This finds the branch that the worktree was created from by looking - at the merge-base between the worktree and common branch names. + This finds the branch that the worktree was created FROM by looking + for common branch names (main, master, develop) that have a valid + merge-base with the worktree. + + Note: We don't use upstream tracking because that returns the worktree's + own branch (e.g., origin/auto-claude/...) rather than the base branch. Args: worktree_path: Path to the worktree Returns: - The detected target branch name, defaults to 'main' if detection fails + The detected base branch name, defaults to 'main' if detection fails """ - # Try to get the upstream tracking branch - try: - result = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], - cwd=worktree_path, - capture_output=True, - text=True, - ) - if result.returncode == 0 and result.stdout.strip(): - upstream = result.stdout.strip() - # Extract branch name from origin/branch format - if "/" in upstream: - return upstream.split("/", 1)[1] - return upstream - except subprocess.CalledProcessError: - pass - # Try common branch names and find which one has a valid merge-base + # This is the reliable way to find what branch the worktree diverged from for branch in ["main", "master", "develop"]: try: result = subprocess.run( @@ -286,14 +314,39 @@ def _detect_target_branch(self, worktree_path: Path) -> str: text=True, ) if result.returncode == 0: + debug( + MODULE, + f"Detected base branch: {branch}", + worktree_path=str(worktree_path), + ) return branch except subprocess.CalledProcessError: continue - # Default to main + # Before defaulting to 'main', verify it exists + # This handles non-standard projects that use trunk, production, etc. + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", "main"], + cwd=worktree_path, + capture_output=True, + text=True, + ) + if result.returncode == 0: + debug_warning( + MODULE, + "Could not find merge-base with standard branches, defaulting to 'main'", + worktree_path=str(worktree_path), + ) + return "main" + except subprocess.CalledProcessError: + pass + + # Last resort: use HEAD~10 as a fallback comparison point + # This allows modification tracking even on non-standard branch setups debug_warning( MODULE, - "Could not detect target branch, defaulting to 'main'", + "No standard base branch found, modification tracking may be limited", worktree_path=str(worktree_path), ) - return "main" + return "HEAD~10" diff --git a/apps/backend/merge/file_merger.py b/apps/backend/merge/file_merger.py index 1038055554..7fc3c35dc7 100644 --- a/apps/backend/merge/file_merger.py +++ b/apps/backend/merge/file_merger.py @@ -19,6 +19,35 @@ from .types import ChangeType, SemanticChange, TaskSnapshot +def detect_line_ending(content: str) -> str: + """ + Detect line ending style in content using priority-based detection. + + Uses a priority order (CRLF > CR > LF) to detect the line ending style. + CRLF is checked first because it contains LF, so presence of any CRLF + indicates Windows-style endings. This approach is fast and works well + for files that consistently use one style. + + Note: This returns the first detected style by priority, not the most + frequent style. For files with mixed line endings, consider normalizing + to a single style before processing. + + Args: + content: File content to analyze + + Returns: + The detected line ending string: "\\r\\n", "\\r", or "\\n" + """ + # Check for CRLF first (Windows) - must check before LF since CRLF contains LF + if "\r\n" in content: + return "\r\n" + # Check for CR (classic Mac, rare but possible) + if "\r" in content: + return "\r" + # Default to LF (Unix/modern Mac) + return "\n" + + def apply_single_task_changes( baseline: str, snapshot: TaskSnapshot, @@ -35,7 +64,16 @@ def apply_single_task_changes( Returns: Modified content with changes applied """ - content = baseline + # Detect line ending style before normalizing + original_line_ending = detect_line_ending(baseline) + + # Normalize to LF for consistent matching with regex_analyzer output + # The regex_analyzer normalizes content to LF when extracting content_before/after, + # so we must also normalize baseline to ensure replace() matches correctly + content = baseline.replace("\r\n", "\n").replace("\r", "\n") + + # Use LF for internal processing + line_ending = "\n" for change in snapshot.semantic_changes: if change.content_before and change.content_after: @@ -45,13 +83,19 @@ def apply_single_task_changes( # Addition - need to determine where to add if change.change_type == ChangeType.ADD_IMPORT: # Add import at top - lines = content.split("\n") + lines = content.splitlines() import_end = find_import_end(lines, file_path) lines.insert(import_end, change.content_after) - content = "\n".join(lines) + content = line_ending.join(lines) elif change.change_type == ChangeType.ADD_FUNCTION: # Add function at end (before exports) - content += f"\n\n{change.content_after}" + content += f"{line_ending}{line_ending}{change.content_after}" + + # Restore original line ending style if it was CRLF + if original_line_ending == "\r\n": + content = content.replace("\n", "\r\n") + elif original_line_ending == "\r": + content = content.replace("\n", "\r") return content @@ -72,7 +116,16 @@ def combine_non_conflicting_changes( Returns: Combined content with all changes applied """ - content = baseline + # Detect line ending style before normalizing + original_line_ending = detect_line_ending(baseline) + + # Normalize to LF for consistent matching with regex_analyzer output + # The regex_analyzer normalizes content to LF when extracting content_before/after, + # so we must also normalize baseline to ensure replace() matches correctly + content = baseline.replace("\r\n", "\n").replace("\r", "\n") + + # Use LF for internal processing + line_ending = "\n" # Group changes by type for proper ordering imports: list[SemanticChange] = [] @@ -96,13 +149,13 @@ def combine_non_conflicting_changes( # Add imports if imports: - lines = content.split("\n") + lines = content.splitlines() import_end = find_import_end(lines, file_path) for imp in imports: if imp.content_after and imp.content_after not in content: lines.insert(import_end, imp.content_after) import_end += 1 - content = "\n".join(lines) + content = line_ending.join(lines) # Apply modifications for mod in modifications: @@ -112,15 +165,21 @@ def combine_non_conflicting_changes( # Add functions for func in functions: if func.content_after: - content += f"\n\n{func.content_after}" + content += f"{line_ending}{line_ending}{func.content_after}" # Apply other changes for change in other: if change.content_after and not change.content_before: - content += f"\n{change.content_after}" + content += f"{line_ending}{change.content_after}" elif change.content_before and change.content_after: content = content.replace(change.content_before, change.content_after) + # Restore original line ending style if it was CRLF + if original_line_ending == "\r\n": + content = content.replace("\n", "\r\n") + elif original_line_ending == "\r": + content = content.replace("\n", "\r") + return content diff --git a/apps/backend/merge/git_utils.py b/apps/backend/merge/git_utils.py index 92bfd40f7b..6868d0d015 100644 --- a/apps/backend/merge/git_utils.py +++ b/apps/backend/merge/git_utils.py @@ -27,28 +27,19 @@ def find_worktree(project_dir: Path, task_id: str) -> Path | None: Returns: Path to the worktree, or None if not found """ - # Check common locations - worktrees_dir = project_dir / ".worktrees" - if worktrees_dir.exists(): - # Look for worktree with task_id in name - for entry in worktrees_dir.iterdir(): + # Check new path first + new_worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks" + if new_worktrees_dir.exists(): + for entry in new_worktrees_dir.iterdir(): if entry.is_dir() and task_id in entry.name: return entry - # Try git worktree list - try: - result = subprocess.run( - ["git", "worktree", "list", "--porcelain"], - cwd=project_dir, - capture_output=True, - text=True, - check=True, - ) - for line in result.stdout.split("\n"): - if line.startswith("worktree ") and task_id in line: - return Path(line.split(" ", 1)[1]) - except subprocess.CalledProcessError: - pass + # Legacy fallback for backwards compatibility + legacy_worktrees_dir = project_dir / ".worktrees" + if legacy_worktrees_dir.exists(): + for entry in legacy_worktrees_dir.iterdir(): + if entry.is_dir() and task_id in entry.name: + return entry return None diff --git a/apps/backend/merge/semantic_analysis/__init__.py b/apps/backend/merge/semantic_analysis/__init__.py index e06d039969..0f4cc099c4 100644 --- a/apps/backend/merge/semantic_analysis/__init__.py +++ b/apps/backend/merge/semantic_analysis/__init__.py @@ -1,12 +1,10 @@ """ -Semantic analyzer package for AST-based code analysis. +Semantic analyzer package for code analysis. This package provides modular semantic analysis capabilities: - models.py: Data structures for extracted elements -- python_analyzer.py: Python-specific AST extraction -- js_analyzer.py: JavaScript/TypeScript-specific AST extraction - comparison.py: Element comparison and change classification -- regex_analyzer.py: Fallback regex-based analysis +- regex_analyzer.py: Regex-based analysis for code changes """ from .models import ExtractedElement diff --git a/apps/backend/merge/semantic_analysis/js_analyzer.py b/apps/backend/merge/semantic_analysis/js_analyzer.py deleted file mode 100644 index 048d03acba..0000000000 --- a/apps/backend/merge/semantic_analysis/js_analyzer.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -JavaScript/TypeScript-specific semantic analysis using tree-sitter. -""" - -from __future__ import annotations - -from collections.abc import Callable - -from .models import ExtractedElement - -try: - from tree_sitter import Node -except ImportError: - Node = None - - -def extract_js_elements( - node: Node, - elements: dict[str, ExtractedElement], - get_text: Callable[[Node], str], - get_line: Callable[[int], int], - ext: str, - parent: str | None = None, -) -> None: - """ - Extract structural elements from JavaScript/TypeScript AST. - - Args: - node: The tree-sitter node to extract from - elements: Dictionary to populate with extracted elements - get_text: Function to extract text from a node - get_line: Function to convert byte position to line number - ext: File extension (.js, .jsx, .ts, .tsx) - parent: Parent element name for nested elements - """ - for child in node.children: - if child.type == "import_statement": - text = get_text(child) - # Try to extract the source module - source_node = child.child_by_field_name("source") - if source_node: - source = get_text(source_node).strip("'\"") - elements[f"import:{source}"] = ExtractedElement( - element_type="import", - name=source, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type in {"function_declaration", "function"}: - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"function:{full_name}"] = ExtractedElement( - element_type="function", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "arrow_function": - # Arrow functions are usually assigned to variables - # We'll catch these via variable declarations - pass - - elif child.type in {"lexical_declaration", "variable_declaration"}: - # const/let/var declarations - for declarator in child.children: - if declarator.type == "variable_declarator": - name_node = declarator.child_by_field_name("name") - value_node = declarator.child_by_field_name("value") - if name_node: - name = get_text(name_node) - content = get_text(child) - - # Check if it's a function (arrow function or function expression) - is_function = False - if value_node and value_node.type in { - "arrow_function", - "function", - }: - is_function = True - elements[f"function:{name}"] = ExtractedElement( - element_type="function", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=content, - parent=parent, - ) - else: - elements[f"variable:{name}"] = ExtractedElement( - element_type="variable", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=content, - parent=parent, - ) - - elif child.type == "class_declaration": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elements[f"class:{name}"] = ExtractedElement( - element_type="class", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - # Recurse into class body - body = child.child_by_field_name("body") - if body: - extract_js_elements( - body, elements, get_text, get_line, ext, parent=name - ) - - elif child.type == "method_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"method:{full_name}"] = ExtractedElement( - element_type="method", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "export_statement": - # Recurse into exports to find the actual declaration - extract_js_elements(child, elements, get_text, get_line, ext, parent) - - # TypeScript specific - elif child.type in {"interface_declaration", "type_alias_declaration"}: - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elem_type = "interface" if "interface" in child.type else "type" - elements[f"{elem_type}:{name}"] = ExtractedElement( - element_type=elem_type, - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - - # Recurse into statement blocks - elif child.type in {"program", "statement_block", "class_body"}: - extract_js_elements(child, elements, get_text, get_line, ext, parent) diff --git a/apps/backend/merge/semantic_analysis/python_analyzer.py b/apps/backend/merge/semantic_analysis/python_analyzer.py deleted file mode 100644 index def71a943b..0000000000 --- a/apps/backend/merge/semantic_analysis/python_analyzer.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Python-specific semantic analysis using tree-sitter. -""" - -from __future__ import annotations - -from collections.abc import Callable - -from .models import ExtractedElement - -try: - from tree_sitter import Node -except ImportError: - Node = None - - -def extract_python_elements( - node: Node, - elements: dict[str, ExtractedElement], - get_text: Callable[[Node], str], - get_line: Callable[[int], int], - parent: str | None = None, -) -> None: - """ - Extract structural elements from Python AST. - - Args: - node: The tree-sitter node to extract from - elements: Dictionary to populate with extracted elements - get_text: Function to extract text from a node - get_line: Function to convert byte position to line number - parent: Parent element name for nested elements - """ - for child in node.children: - if child.type == "import_statement": - # import x, y - text = get_text(child) - # Extract module names - for name_node in child.children: - if name_node.type == "dotted_name": - name = get_text(name_node) - elements[f"import:{name}"] = ExtractedElement( - element_type="import", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type == "import_from_statement": - # from x import y, z - text = get_text(child) - module = None - for sub in child.children: - if sub.type == "dotted_name": - module = get_text(sub) - break - if module: - elements[f"import_from:{module}"] = ExtractedElement( - element_type="import_from", - name=module, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type == "function_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"function:{full_name}"] = ExtractedElement( - element_type="function", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "class_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elements[f"class:{name}"] = ExtractedElement( - element_type="class", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - # Recurse into class body for methods - body = child.child_by_field_name("body") - if body: - extract_python_elements( - body, elements, get_text, get_line, parent=name - ) - - elif child.type == "decorated_definition": - # Handle decorated functions/classes - for sub in child.children: - if sub.type in {"function_definition", "class_definition"}: - extract_python_elements(child, elements, get_text, get_line, parent) - break - - # Recurse for other compound statements - elif child.type in { - "if_statement", - "while_statement", - "for_statement", - "try_statement", - "with_statement", - }: - extract_python_elements(child, elements, get_text, get_line, parent) diff --git a/apps/backend/merge/semantic_analysis/regex_analyzer.py b/apps/backend/merge/semantic_analysis/regex_analyzer.py index 40556f765c..9ceff32bee 100644 --- a/apps/backend/merge/semantic_analysis/regex_analyzer.py +++ b/apps/backend/merge/semantic_analysis/regex_analyzer.py @@ -1,5 +1,5 @@ """ -Regex-based fallback analysis when tree-sitter is not available. +Regex-based semantic analysis for code changes. """ from __future__ import annotations @@ -17,7 +17,7 @@ def analyze_with_regex( ext: str, ) -> FileAnalysis: """ - Fallback analysis using regex when tree-sitter isn't available. + Analyze code changes using regex patterns. Args: file_path: Path to the file being analyzed @@ -30,11 +30,16 @@ def analyze_with_regex( """ changes: list[SemanticChange] = [] + # Normalize line endings to LF for consistent cross-platform behavior + # This handles Windows CRLF, old Mac CR, and Unix LF + before_normalized = before.replace("\r\n", "\n").replace("\r", "\n") + after_normalized = after.replace("\r\n", "\n").replace("\r", "\n") + # Get a unified diff diff = list( difflib.unified_diff( - before.splitlines(keepends=True), - after.splitlines(keepends=True), + before_normalized.splitlines(keepends=True), + after_normalized.splitlines(keepends=True), lineterm="", ) ) @@ -89,8 +94,22 @@ def analyze_with_regex( # Detect function changes (simplified) func_pattern = get_function_pattern(ext) if func_pattern: - funcs_before = set(func_pattern.findall(before)) - funcs_after = set(func_pattern.findall(after)) + # For JS/TS patterns with alternation, findall() returns tuples + # Extract the non-empty match from each tuple + def extract_func_names(matches): + names = set() + for match in matches: + if isinstance(match, tuple): + # Get the first non-empty group from the tuple + name = next((m for m in match if m), None) + if name: + names.add(name) + elif match: + names.add(match) + return names + + funcs_before = extract_func_names(func_pattern.findall(before_normalized)) + funcs_after = extract_func_names(func_pattern.findall(after_normalized)) for func in funcs_after - funcs_before: changes.append( diff --git a/apps/backend/merge/semantic_analyzer.py b/apps/backend/merge/semantic_analyzer.py index 07aea59056..30697c1a94 100644 --- a/apps/backend/merge/semantic_analyzer.py +++ b/apps/backend/merge/semantic_analyzer.py @@ -2,32 +2,27 @@ Semantic Analyzer ================= -Analyzes code changes at a semantic level using tree-sitter. +Analyzes code changes at a semantic level using regex-based heuristics. -This module provides AST-based analysis of code changes, extracting -meaningful semantic changes like "added import", "modified function", -"wrapped JSX element" rather than line-level diffs. - -When tree-sitter is not available, falls back to regex-based heuristics. +This module provides analysis of code changes, extracting meaningful +semantic changes like "added import", "modified function", "wrapped JSX element" +rather than line-level diffs. """ from __future__ import annotations import logging from pathlib import Path -from typing import Any -from .types import ChangeType, FileAnalysis +from .types import FileAnalysis # Import debug utilities try: from debug import ( debug, debug_detailed, - debug_error, debug_success, debug_verbose, - is_debug_enabled, ) except ImportError: # Fallback if debug module not available @@ -43,71 +38,18 @@ def debug_verbose(*args, **kwargs): def debug_success(*args, **kwargs): pass - def debug_error(*args, **kwargs): - pass - - def is_debug_enabled(): - return False - logger = logging.getLogger(__name__) MODULE = "merge.semantic_analyzer" -# Try to import tree-sitter - it's optional but recommended -TREE_SITTER_AVAILABLE = False -try: - import tree_sitter # noqa: F401 - from tree_sitter import Language, Node, Parser, Tree - - TREE_SITTER_AVAILABLE = True - logger.info("tree-sitter available, using AST-based analysis") -except ImportError: - logger.warning("tree-sitter not available, using regex-based fallback") - Tree = None - Node = None - -# Try to import language bindings -LANGUAGES_AVAILABLE: dict[str, Any] = {} -if TREE_SITTER_AVAILABLE: - try: - import tree_sitter_python as tspython - - LANGUAGES_AVAILABLE[".py"] = tspython.language() - except ImportError: - pass - - try: - import tree_sitter_javascript as tsjs - - LANGUAGES_AVAILABLE[".js"] = tsjs.language() - LANGUAGES_AVAILABLE[".jsx"] = tsjs.language() - except ImportError: - pass - - try: - import tree_sitter_typescript as tsts - - LANGUAGES_AVAILABLE[".ts"] = tsts.language_typescript() - LANGUAGES_AVAILABLE[".tsx"] = tsts.language_tsx() - except ImportError: - pass - -# Import our modular components -from .semantic_analysis.comparison import compare_elements +# Import regex-based analyzer from .semantic_analysis.models import ExtractedElement from .semantic_analysis.regex_analyzer import analyze_with_regex -if TREE_SITTER_AVAILABLE: - from .semantic_analysis.js_analyzer import extract_js_elements - from .semantic_analysis.python_analyzer import extract_python_elements - class SemanticAnalyzer: """ - Analyzes code changes at a semantic level. - - Uses tree-sitter for AST-based analysis when available, - falling back to regex-based heuristics when not. + Analyzes code changes at a semantic level using regex-based heuristics. Example: analyzer = SemanticAnalyzer() @@ -117,28 +59,8 @@ class SemanticAnalyzer: """ def __init__(self): - """Initialize the analyzer with available parsers.""" - self._parsers: dict[str, Parser] = {} - - debug( - MODULE, - "Initializing SemanticAnalyzer", - tree_sitter_available=TREE_SITTER_AVAILABLE, - ) - - if TREE_SITTER_AVAILABLE: - for ext, lang in LANGUAGES_AVAILABLE.items(): - parser = Parser() - parser.language = Language(lang) - self._parsers[ext] = parser - debug_detailed(MODULE, f"Initialized parser for {ext}") - debug_success( - MODULE, - "SemanticAnalyzer initialized", - parsers=list(self._parsers.keys()), - ) - else: - debug(MODULE, "Using regex-based fallback (tree-sitter not available)") + """Initialize the analyzer.""" + debug(MODULE, "Initializing SemanticAnalyzer (regex-based)") def analyze_diff( self, @@ -171,13 +93,8 @@ def analyze_diff( task_id=task_id, ) - # Use tree-sitter if available for this language - if ext in self._parsers: - debug_detailed(MODULE, f"Using tree-sitter parser for {ext}") - analysis = self._analyze_with_tree_sitter(file_path, before, after, ext) - else: - debug_detailed(MODULE, f"Using regex fallback for {ext}") - analysis = analyze_with_regex(file_path, before, after, ext) + # Use regex-based analysis + analysis = analyze_with_regex(file_path, before, after, ext) debug_success( MODULE, @@ -201,77 +118,6 @@ def analyze_diff( return analysis - def _analyze_with_tree_sitter( - self, - file_path: str, - before: str, - after: str, - ext: str, - ) -> FileAnalysis: - """Analyze using tree-sitter AST parsing.""" - parser = self._parsers[ext] - - tree_before = parser.parse(bytes(before, "utf-8")) - tree_after = parser.parse(bytes(after, "utf-8")) - - # Extract structural elements from both versions - elements_before = self._extract_elements(tree_before, before, ext) - elements_after = self._extract_elements(tree_after, after, ext) - - # Compare and generate semantic changes - changes = compare_elements(elements_before, elements_after, ext) - - # Build the analysis - analysis = FileAnalysis(file_path=file_path, changes=changes) - - # Populate summary fields - for change in changes: - if change.change_type in { - ChangeType.MODIFY_FUNCTION, - ChangeType.ADD_HOOK_CALL, - }: - analysis.functions_modified.add(change.target) - elif change.change_type == ChangeType.ADD_FUNCTION: - analysis.functions_added.add(change.target) - elif change.change_type == ChangeType.ADD_IMPORT: - analysis.imports_added.add(change.target) - elif change.change_type == ChangeType.REMOVE_IMPORT: - analysis.imports_removed.add(change.target) - elif change.change_type in { - ChangeType.MODIFY_CLASS, - ChangeType.ADD_METHOD, - }: - analysis.classes_modified.add(change.target.split(".")[0]) - - analysis.total_lines_changed += change.line_end - change.line_start + 1 - - return analysis - - def _extract_elements( - self, - tree: Tree, - source: str, - ext: str, - ) -> dict[str, ExtractedElement]: - """Extract structural elements from a syntax tree.""" - elements: dict[str, ExtractedElement] = {} - source_bytes = bytes(source, "utf-8") - - def get_text(node: Node) -> str: - return source_bytes[node.start_byte : node.end_byte].decode("utf-8") - - def get_line(byte_pos: int) -> int: - # Convert byte position to line number (1-indexed) - return source[:byte_pos].count("\n") + 1 - - # Language-specific extraction - if ext == ".py": - extract_python_elements(tree.root_node, elements, get_text, get_line) - elif ext in {".js", ".jsx", ".ts", ".tsx"}: - extract_js_elements(tree.root_node, elements, get_text, get_line, ext) - - return elements - def analyze_file(self, file_path: str, content: str) -> FileAnalysis: """ Analyze a single file's structure (not a diff). @@ -291,12 +137,7 @@ def analyze_file(self, file_path: str, content: str) -> FileAnalysis: @property def supported_extensions(self) -> set[str]: """Get the set of supported file extensions.""" - if TREE_SITTER_AVAILABLE: - # Tree-sitter extensions plus regex fallbacks - return set(self._parsers.keys()) | {".py", ".js", ".jsx", ".ts", ".tsx"} - else: - # Only regex-supported extensions - return {".py", ".js", ".jsx", ".ts", ".tsx"} + return {".py", ".js", ".jsx", ".ts", ".tsx"} def is_supported(self, file_path: str) -> bool: """Check if a file type is supported for semantic analysis.""" diff --git a/apps/backend/merge/timeline_git.py b/apps/backend/merge/timeline_git.py index ebf0952a22..cc9e6ca6cd 100644 --- a/apps/backend/merge/timeline_git.py +++ b/apps/backend/merge/timeline_git.py @@ -189,7 +189,14 @@ def get_worktree_file_content(self, task_id: str, file_path: str) -> str: task_id.replace("task-", "") if task_id.startswith("task-") else task_id ) - worktree_path = self.project_path / ".worktrees" / spec_name / file_path + worktree_path = ( + self.project_path + / ".auto-claude" + / "worktrees" + / "tasks" + / spec_name + / file_path + ) if worktree_path.exists(): try: return worktree_path.read_text(encoding="utf-8") diff --git a/apps/backend/phase_config.py b/apps/backend/phase_config.py index f7b85cdee5..3fc9ba74ef 100644 --- a/apps/backend/phase_config.py +++ b/apps/backend/phase_config.py @@ -7,6 +7,7 @@ """ import json +import os from pathlib import Path from typing import Literal, TypedDict @@ -46,10 +47,10 @@ "complexity_assessment": "medium", } -# Default phase configuration (matches UI defaults) +# Default phase configuration (fallback, matches 'Balanced' profile) DEFAULT_PHASE_MODELS: dict[str, str] = { "spec": "sonnet", - "planning": "opus", + "planning": "sonnet", # Changed from "opus" (fix #433) "coding": "sonnet", "qa": "sonnet", } @@ -94,17 +95,34 @@ def resolve_model_id(model: str) -> str: Resolve a model shorthand (haiku, sonnet, opus) to a full model ID. If the model is already a full ID, return it unchanged. + Priority: + 1. Environment variable override (from API Profile) + 2. Hardcoded MODEL_ID_MAP + 3. Pass through unchanged (assume full model ID) + Args: model: Model shorthand or full ID Returns: Full Claude model ID """ - # Check if it's a shorthand + # Check for environment variable override (from API Profile custom model mappings) if model in MODEL_ID_MAP: + env_var_map = { + "haiku": "ANTHROPIC_DEFAULT_HAIKU_MODEL", + "sonnet": "ANTHROPIC_DEFAULT_SONNET_MODEL", + "opus": "ANTHROPIC_DEFAULT_OPUS_MODEL", + } + env_var = env_var_map.get(model) + if env_var: + env_value = os.environ.get(env_var) + if env_value: + return env_value + + # Fall back to hardcoded mapping return MODEL_ID_MAP[model] - # Already a full model ID + # Already a full model ID or unknown shorthand return model diff --git a/apps/backend/project/command_registry/languages.py b/apps/backend/project/command_registry/languages.py index cd10b0d6b1..e91787eb4e 100644 --- a/apps/backend/project/command_registry/languages.py +++ b/apps/backend/project/command_registry/languages.py @@ -173,12 +173,16 @@ "zig", }, "dart": { + # Core Dart CLI (modern unified tool) "dart", + "pub", + # Flutter CLI (included in Dart language for SDK detection) + "flutter", + # Legacy commands (deprecated but may exist in older projects) "dart2js", "dartanalyzer", "dartdoc", "dartfmt", - "pub", }, } diff --git a/apps/backend/project/command_registry/package_managers.py b/apps/backend/project/command_registry/package_managers.py index 46b30b3712..bf6c1d978a 100644 --- a/apps/backend/project/command_registry/package_managers.py +++ b/apps/backend/project/command_registry/package_managers.py @@ -33,6 +33,9 @@ "brew": {"brew"}, "apt": {"apt", "apt-get", "dpkg"}, "nix": {"nix", "nix-shell", "nix-build", "nix-env"}, + # Dart/Flutter package managers + "pub": {"pub", "dart"}, + "melos": {"melos", "dart", "flutter"}, } diff --git a/apps/backend/project/command_registry/version_managers.py b/apps/backend/project/command_registry/version_managers.py index b4356d0449..04e8e3925b 100644 --- a/apps/backend/project/command_registry/version_managers.py +++ b/apps/backend/project/command_registry/version_managers.py @@ -23,6 +23,8 @@ "rustup": {"rustup"}, "sdkman": {"sdk"}, "jabba": {"jabba"}, + # Dart/Flutter version managers + "fvm": {"fvm", "flutter"}, } diff --git a/apps/backend/project/stack_detector.py b/apps/backend/project/stack_detector.py index 051c685c93..0fa67c29b3 100644 --- a/apps/backend/project/stack_detector.py +++ b/apps/backend/project/stack_detector.py @@ -164,6 +164,12 @@ def detect_package_managers(self) -> None: if self.parser.file_exists("build.gradle", "build.gradle.kts"): self.stack.package_managers.append("gradle") + # Dart/Flutter package managers + if self.parser.file_exists("pubspec.yaml", "pubspec.lock"): + self.stack.package_managers.append("pub") + if self.parser.file_exists("melos.yaml"): + self.stack.package_managers.append("melos") + def detect_databases(self) -> None: """Detect databases from config files and dependencies.""" # Check for database config files @@ -358,3 +364,6 @@ def detect_version_managers(self) -> None: self.stack.version_managers.append("rbenv") if self.parser.file_exists("rust-toolchain.toml", "rust-toolchain"): self.stack.version_managers.append("rustup") + # Flutter Version Manager + if self.parser.file_exists(".fvm", ".fvmrc", "fvm_config.json"): + self.stack.version_managers.append("fvm") diff --git a/apps/backend/prompts/coder.md b/apps/backend/prompts/coder.md index c9cde7f3c2..8b0acd9ef1 100644 --- a/apps/backend/prompts/coder.md +++ b/apps/backend/prompts/coder.md @@ -22,6 +22,68 @@ environment at the start of each prompt in the "YOUR ENVIRONMENT" section. Pay c --- +## 🚨 CRITICAL: PATH CONFUSION PREVENTION 🚨 + +**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands** + +### The Problem + +After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`. + +### The Solution: ALWAYS CHECK YOUR CWD + +**BEFORE every git command or file operation:** + +```bash +# Step 1: Check where you are +pwd + +# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY +# If pwd shows: /path/to/project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts +``` + +### Examples + +**❌ WRONG - Path gets doubled:** +```bash +cd ./apps/frontend +git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts +``` + +**βœ… CORRECT - Use relative path from current directory:** +```bash +cd ./apps/frontend +pwd # Shows: /path/to/project/apps/frontend +git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root +``` + +**βœ… ALSO CORRECT - Stay at root, use full relative path:** +```bash +# Don't change directory at all +git add ./apps/frontend/src/file.ts # Works from project root +``` + +### Mandatory Pre-Command Check + +**Before EVERY git add, git commit, or file operation in a monorepo:** + +```bash +# 1. Where am I? +pwd + +# 2. What files am I targeting? +ls -la [target-path] # Verify the path exists + +# 3. Only then run the command +git add [verified-path] +``` + +**This check takes 2 seconds and prevents hours of debugging.** + +--- + ## STEP 1: GET YOUR BEARINGS (MANDATORY) First, check your environment. The prompt should tell you your working directory and spec location. @@ -358,6 +420,20 @@ In your response, acknowledge the checklist: ## STEP 6: IMPLEMENT THE SUBTASK +### Verify Your Location FIRST + +**MANDATORY: Before implementing anything, confirm where you are:** + +```bash +# This should match the "Working Directory" in YOUR ENVIRONMENT section above +pwd +``` + +If you change directories during implementation (e.g., `cd apps/frontend`), remember: +- Your file paths must be RELATIVE TO YOUR NEW LOCATION +- Before any git operation, run `pwd` again to verify your location +- See the "PATH CONFUSION PREVENTION" section above for examples + ### Mark as In Progress Update `implementation_plan.json`: @@ -618,6 +694,31 @@ After successful verification, update the subtask: ## STEP 9: COMMIT YOUR PROGRESS +### Path Verification (MANDATORY FIRST STEP) + +**🚨 BEFORE running ANY git commands, verify your current directory:** + +```bash +# Step 1: Where am I? +pwd + +# Step 2: What files do I want to commit? +# If you changed to a subdirectory (e.g., cd apps/frontend), +# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root + +# Step 3: Verify paths exist +ls -la [path-to-files] # Make sure the path is correct from your current location + +# Example in a monorepo: +# If pwd shows: /project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts) +``` + +**CRITICAL RULE:** If you're in a subdirectory, either: +- **Option A:** Return to project root: `cd [back to working directory]` +- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`) + ### Secret Scanning (Automatic) The system **automatically scans for secrets** before every commit. If secrets are detected, the commit will be blocked and you'll receive detailed instructions on how to fix it. @@ -634,7 +735,7 @@ The system **automatically scans for secrets** before every commit. If secrets a api_key = os.environ.get("API_KEY") ``` 3. **Update .env.example** - Add placeholder for the new variable -4. **Re-stage and retry** - `git add . && git commit ...` +4. **Re-stage and retry** - `git add . ':!.auto-claude' && git commit ...` **If it's a false positive:** - Add the file pattern to `.secretsignore` in the project root @@ -643,7 +744,17 @@ The system **automatically scans for secrets** before every commit. If secrets a ### Create the Commit ```bash -git add . +# FIRST: Make sure you're in the working directory root (check YOUR ENVIRONMENT section at top) +pwd # Should match your working directory + +# Add all files EXCEPT .auto-claude directory (spec files should never be committed) +git add . ':!.auto-claude' + +# If git add fails with "pathspec did not match", you have a path problem: +# 1. Run pwd to see where you are +# 2. Run git status to see what git sees +# 3. Adjust your paths accordingly + git commit -m "auto-claude: Complete [subtask-id] - [subtask description] - Files modified: [list] @@ -651,6 +762,9 @@ git commit -m "auto-claude: Complete [subtask-id] - [subtask description] - Phase progress: [X]/[Y] subtasks complete" ``` +**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed. +These are internal tracking files that must stay local. + ### DO NOT Push to Remote **IMPORTANT**: Do NOT run `git push`. All work stays local until the user reviews and approves. @@ -956,6 +1070,17 @@ Prepare β†’ Test (small batch) β†’ Execute (full) β†’ Cleanup - Clean, working state - **Secret scan must pass before commit** +### Git Configuration - NEVER MODIFY +**CRITICAL**: You MUST NOT modify git user configuration. Never run: +- `git config user.name` +- `git config user.email` +- `git config --local user.*` +- `git config --global user.*` + +The repository inherits the user's configured git identity. Creating "Test User" or +any other fake identity breaks attribution and causes serious issues. If you need +to commit changes, use the existing git identity - do NOT set a new one. + ### The Golden Rule **FIX BUGS NOW.** The next session has no memory. diff --git a/apps/backend/prompts/github/pr_codebase_fit_agent.md b/apps/backend/prompts/github/pr_codebase_fit_agent.md index f9e14e1e3f..9a14b56dbc 100644 --- a/apps/backend/prompts/github/pr_codebase_fit_agent.md +++ b/apps/backend/prompts/github/pr_codebase_fit_agent.md @@ -6,6 +6,23 @@ You are a focused codebase fit review agent. You have been spawned by the orches Ensure new code integrates well with the existing codebase. Check for consistency with project conventions, reuse of existing utilities, and architectural alignment. Focus ONLY on codebase fit - not security, logic correctness, or general quality. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Codebase fit issues in changed code** - New code not following project patterns +2. **Missed reuse opportunities** - "Existing `utils.ts` has a helper for this" +3. **Inconsistent with PR's own changes** - "You used `camelCase` here but `snake_case` elsewhere in the PR" +4. **Breaking conventions in touched areas** - "Your change deviates from the pattern in this file" + +### What is NOT in scope (do NOT report): +1. **Pre-existing inconsistencies** - Old code that doesn't follow patterns +2. **Unrelated suggestions** - Don't suggest patterns for code the PR didn't touch + +**Key distinction:** +- βœ… "Your new component doesn't follow the existing pattern in `components/`" - GOOD +- βœ… "Consider using existing `formatDate()` helper instead of new implementation" - GOOD +- ❌ "The old `legacy/` folder uses different naming conventions" - BAD (pre-existing) + ## Codebase Fit Focus Areas ### 1. Naming Conventions diff --git a/apps/backend/prompts/github/pr_finding_validator.md b/apps/backend/prompts/github/pr_finding_validator.md index b054344ea9..6421e37132 100644 --- a/apps/backend/prompts/github/pr_finding_validator.md +++ b/apps/backend/prompts/github/pr_finding_validator.md @@ -1,16 +1,37 @@ # Finding Validator Agent -You are a finding re-investigator. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE. +You are a finding re-investigator using EVIDENCE-BASED VALIDATION. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE. + +**Core Principle: Evidence, not confidence scores.** Either you can prove the issue exists with actual code, or you can't. There is no middle ground. Your job is to prevent false positives from persisting indefinitely by actually reading the code and verifying the issue exists. +## CRITICAL: Check PR Scope First + +**Before investigating any finding, verify it's within THIS PR's scope:** + +1. **Check if the file is in the PR's changed files list** - If not, likely out-of-scope +2. **Check if the line number exists** - If finding cites line 710 but file has 600 lines, it's hallucinated +3. **Check for PR references in commit messages** - Commits like `fix: something (#584)` are from OTHER PRs + +**Dismiss findings as `dismissed_false_positive` if:** +- The finding references a file NOT in the PR's changed files list AND is not about impact on that file +- The line number doesn't exist in the file (hallucinated) +- The finding is about code from a merged branch commit (not this PR's work) + +**Keep findings valid if they're about:** +- Issues in code the PR actually changed +- Impact of PR changes on other code (e.g., "this change breaks callers in X") +- Missing updates to related code (e.g., "you updated A but forgot B") + ## Your Mission For each finding you receive: -1. **READ** the actual code at the file/line location using the Read tool -2. **ANALYZE** whether the described issue actually exists in the code -3. **PROVIDE** concrete code evidence for your conclusion -4. **RETURN** validation status with evidence +1. **VERIFY SCOPE** - Is this file/line actually part of this PR? +2. **READ** the actual code at the file/line location using the Read tool +3. **ANALYZE** whether the described issue actually exists in the code +4. **PROVIDE** concrete code evidence - the actual code that proves or disproves the issue +5. **RETURN** validation status with evidence (binary decision based on what the code shows) ## Investigation Process @@ -24,45 +45,61 @@ Read the file: {finding.file} Focus on lines around: {finding.line} ``` -### Step 2: Analyze with Fresh Eyes +### Step 2: Analyze with Fresh Eyes - NEVER ASSUME + +**CRITICAL: Do NOT assume the original finding is correct.** The original reviewer may have: +- Hallucinated line numbers that don't exist +- Misread or misunderstood the code +- Missed validation/sanitization in callers or surrounding code +- Made assumptions without actually reading the implementation +- Confused similar-looking code patterns + +**You MUST actively verify by asking:** +- Does the code at this exact line ACTUALLY have this issue? +- Did I READ the actual implementation, not just the function name? +- Is there validation/sanitization BEFORE this code is reached? +- Is there framework protection I'm not accounting for? +- Does this line number even EXIST in the file? -**Do NOT assume the original finding is correct.** Ask yourself: -- Does the code ACTUALLY have this issue? -- Is the described vulnerability/bug/problem present? -- Could the original reviewer have misunderstood the code? -- Is there context that makes this NOT an issue (e.g., sanitization elsewhere)? +**NEVER:** +- Trust the finding description without reading the code +- Assume a function is vulnerable based on its name +- Skip checking surrounding context (Β±20 lines minimum) +- Confirm a finding just because "it sounds plausible" -Be skeptical. The original review may have hallucinated this finding. +Be HIGHLY skeptical. AI reviews frequently produce false positives. Your job is to catch them. ### Step 3: Document Evidence You MUST provide concrete evidence: -- **Exact code snippet** you examined (copy-paste from the file) +- **Exact code snippet** you examined (copy-paste from the file) - this is the PROOF - **Line numbers** where you found (or didn't find) the issue -- **Your analysis** of whether the issue exists -- **Confidence level** (0.0-1.0) in your conclusion +- **Your analysis** connecting the code to your conclusion +- **Verification flag** - did this code actually exist at the specified location? ## Validation Statuses ### `confirmed_valid` -Use when you verify the issue IS real: +Use when your code evidence PROVES the issue IS real: - The problematic code pattern exists exactly as described -- The vulnerability/bug is present and exploitable +- You can point to the specific lines showing the vulnerability/bug - The code quality issue genuinely impacts the codebase +- **Key question**: Does your code_evidence field contain the actual problematic code? ### `dismissed_false_positive` -Use when you verify the issue does NOT exist: -- The described code pattern is not actually present -- The original finding misunderstood the code -- There is mitigating code that prevents the issue (e.g., input validation elsewhere) -- The finding was based on incorrect assumptions +Use when your code evidence PROVES the issue does NOT exist: +- The described code pattern is not actually present (code_evidence shows different code) +- There is mitigating code that prevents the issue (code_evidence shows the mitigation) +- The finding was based on incorrect assumptions (code_evidence shows reality) +- The line number doesn't exist or contains different code than claimed +- **Key question**: Does your code_evidence field show code that disproves the original finding? ### `needs_human_review` -Use when you cannot determine with confidence: -- The issue requires runtime analysis to verify +Use when you CANNOT find definitive evidence either way: +- The issue requires runtime analysis to verify (static code doesn't prove/disprove) - The code is too complex to analyze statically -- You have conflicting evidence -- Your confidence is below 0.70 +- You found the code but can't determine if it's actually a problem +- **Key question**: Is your code_evidence inconclusive? ## Output Format @@ -75,7 +112,7 @@ Return one result per finding: "code_evidence": "const query = `SELECT * FROM users WHERE id = ${userId}`;", "line_range": [45, 45], "explanation": "SQL injection vulnerability confirmed. User input 'userId' is directly interpolated into the SQL query at line 45 without any sanitization. The query is executed via db.execute() on line 46.", - "confidence": 0.95 + "evidence_verified_in_file": true } ``` @@ -85,8 +122,8 @@ Return one result per finding: "validation_status": "dismissed_false_positive", "code_evidence": "function processInput(data: string): string {\n const sanitized = DOMPurify.sanitize(data);\n return sanitized;\n}", "line_range": [23, 26], - "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned.", - "confidence": 0.88 + "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned. The code evidence proves the issue does NOT exist.", + "evidence_verified_in_file": true } ``` @@ -96,38 +133,56 @@ Return one result per finding: "validation_status": "needs_human_review", "code_evidence": "async function handleRequest(req) {\n // Complex async logic...\n}", "line_range": [100, 150], - "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. Cannot determine statically.", - "confidence": 0.45 + "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. The static code doesn't provide definitive evidence either way.", + "evidence_verified_in_file": true } ``` -## Confidence Guidelines +```json +{ + "finding_id": "HALLUC-004", + "validation_status": "dismissed_false_positive", + "code_evidence": "// Line 710 does not exist - file only has 600 lines", + "line_range": [600, 600], + "explanation": "The original finding claimed an issue at line 710, but the file only has 600 lines. This is a hallucinated finding - the code doesn't exist.", + "evidence_verified_in_file": false +} +``` + +## Evidence Guidelines -Rate your confidence based on how certain you are: +Validation is binary based on what the code evidence shows: -| Confidence | Meaning | -|------------|---------| -| 0.90-1.00 | Definitive evidence - code clearly shows the issue exists/doesn't exist | -| 0.80-0.89 | Strong evidence - high confidence with minor uncertainty | -| 0.70-0.79 | Moderate evidence - likely correct but some ambiguity | -| 0.50-0.69 | Uncertain - use `needs_human_review` | -| Below 0.50 | Insufficient evidence - must use `needs_human_review` | +| Scenario | Status | Evidence Required | +|----------|--------|-------------------| +| Code shows the exact problem claimed | `confirmed_valid` | Problematic code snippet | +| Code shows issue doesn't exist or is mitigated | `dismissed_false_positive` | Code proving issue is absent | +| Code couldn't be found (hallucinated line/file) | `dismissed_false_positive` | Note that code doesn't exist | +| Code found but can't prove/disprove statically | `needs_human_review` | The inconclusive code | -**Minimum thresholds:** -- To confirm as `confirmed_valid`: confidence >= 0.70 -- To dismiss as `dismissed_false_positive`: confidence >= 0.80 (higher bar for dismissal) -- If below thresholds: must use `needs_human_review` +**Decision rules:** +- If `code_evidence` contains problematic code β†’ `confirmed_valid` +- If `code_evidence` proves issue doesn't exist β†’ `dismissed_false_positive` +- If `evidence_verified_in_file` is false β†’ `dismissed_false_positive` (hallucinated finding) +- If you can't determine from the code β†’ `needs_human_review` ## Common False Positive Patterns Watch for these patterns that often indicate false positives: -1. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code -2. **Internal-only code**: Code only handles trusted internal data, not user input -3. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization) -4. **Dead code**: The flagged code is never executed in the current codebase -5. **Test code**: The issue is in test files where it's acceptable -6. **Misread syntax**: Original reviewer misunderstood the language syntax +1. **Non-existent line number**: The line number cited doesn't exist or is beyond EOF - hallucinated finding +2. **Merged branch code**: Finding is about code from a commit like `fix: something (#584)` - another PR +3. **Pre-existing issue, not impact**: Finding flags old bug in untouched code without showing how PR changes relate +4. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code +5. **Internal-only code**: Code only handles trusted internal data, not user input +6. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization) +7. **Dead code**: The flagged code is never executed in the current codebase +8. **Test code**: The issue is in test files where it's acceptable +9. **Misread syntax**: Original reviewer misunderstood the language syntax + +**Note**: Findings about files outside the PR's changed list are NOT automatically false positives if they're about: +- Impact of PR changes on that file (e.g., "your change breaks X") +- Missing related updates (e.g., "you forgot to update Y") ## Common Valid Issue Patterns @@ -144,15 +199,16 @@ These patterns often confirm the issue is real: 1. **ALWAYS read the actual code** - Never rely on memory or the original finding description 2. **ALWAYS provide code_evidence** - No empty strings. Quote the actual code. 3. **Be skeptical of original findings** - Many AI reviews produce false positives -4. **Higher bar for dismissal** - Need 0.80 confidence to dismiss (vs 0.70 to confirm) -5. **When uncertain, escalate** - Use `needs_human_review` rather than guessing +4. **Evidence is binary** - The code either shows the problem or it doesn't +5. **When evidence is inconclusive, escalate** - Use `needs_human_review` rather than guessing 6. **Look for mitigations** - Check surrounding code for sanitization/validation 7. **Check the full context** - Read Β±20 lines, not just the flagged line +8. **Verify code exists** - Set `evidence_verified_in_file` to false if the code/line doesn't exist ## Anti-Patterns to Avoid -- **Trusting the original finding blindly** - Always verify -- **Dismissing without reading code** - Must provide code_evidence -- **Low confidence dismissals** - Needs 0.80+ confidence to dismiss -- **Vague explanations** - Be specific about what you found +- **Trusting the original finding blindly** - Always verify with actual code +- **Dismissing without reading code** - Must provide code_evidence that proves your point +- **Vague explanations** - Be specific about what the code shows and why it proves/disproves the issue - **Missing line numbers** - Always include line_range +- **Speculative conclusions** - Only conclude what the code evidence actually proves diff --git a/apps/backend/prompts/github/pr_followup.md b/apps/backend/prompts/github/pr_followup.md index 1e2fe04efb..423463f05b 100644 --- a/apps/backend/prompts/github/pr_followup.md +++ b/apps/backend/prompts/github/pr_followup.md @@ -71,10 +71,12 @@ Review the diff since the last review for NEW issues: - Regressions that break previously working code - Missing error handling in new code paths -**Apply the 80% confidence threshold:** -- Only report issues you're confident about +**NEVER ASSUME - ALWAYS VERIFY:** +- Actually READ the code before reporting any finding +- Verify the issue exists at the exact line you cite +- Check for validation/mitigation in surrounding code - Don't re-report issues from the previous review -- Focus on genuinely new problems +- Focus on genuinely new problems with code EVIDENCE ### Phase 3: Comment Review @@ -137,11 +139,11 @@ Return a JSON object with this structure: "id": "new-finding-1", "severity": "medium", "category": "security", - "confidence": 0.85, "title": "New hardcoded API key in config", "description": "A new API key was added in config.ts line 45 without using environment variables.", "file": "src/config.ts", "line": 45, + "evidence": "const API_KEY = 'sk-prod-abc123xyz789';", "suggested_fix": "Move to environment variable: process.env.EXTERNAL_API_KEY" } ], @@ -175,11 +177,11 @@ Same format as initial review findings: - **id**: Unique identifier for new finding - **severity**: `critical` | `high` | `medium` | `low` - **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance` -- **confidence**: Float 0.80-1.0 - **title**: Short summary (max 80 chars) - **description**: Detailed explanation - **file**: Relative file path - **line**: Line number +- **evidence**: **REQUIRED** - Actual code snippet proving the issue exists - **suggested_fix**: How to resolve ### verdict diff --git a/apps/backend/prompts/github/pr_followup_newcode_agent.md b/apps/backend/prompts/github/pr_followup_newcode_agent.md index c35e84f876..5021113b97 100644 --- a/apps/backend/prompts/github/pr_followup_newcode_agent.md +++ b/apps/backend/prompts/github/pr_followup_newcode_agent.md @@ -11,6 +11,23 @@ Review the incremental diff for: 4. Potential regressions 5. Incomplete implementations +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "This change breaks callers in `other_file.ts`" +3. **Missing related changes** - "Similar pattern in `utils.ts` wasn't updated" +4. **Incomplete implementations** - "New field added but not handled in serializer" + +### What is NOT in scope (do NOT report): +1. **Pre-existing bugs** - Old bugs in code this PR didn't touch +2. **Code from merged branches** - Commits with PR references like `(#584)` are from other PRs +3. **Unrelated improvements** - Don't suggest refactoring untouched code + +**Key distinction:** +- βœ… "Your change breaks the caller in `auth.ts`" - GOOD (impact analysis) +- ❌ "The old code in `legacy.ts` has a bug" - BAD (pre-existing, not this PR) + ## Focus Areas Since this is a follow-up review, focus on: @@ -74,15 +91,47 @@ Since this is a follow-up review, focus on: - Minor optimizations - Documentation gaps -## Confidence Scoring +## NEVER ASSUME - ALWAYS VERIFY + +**Before reporting ANY new finding:** + +1. **NEVER assume code is vulnerable** - Read the actual implementation +2. **NEVER assume validation is missing** - Check callers and surrounding code +3. **NEVER assume based on function names** - `unsafeQuery()` might actually be safe +4. **NEVER report without reading the code** - Verify the issue exists at the exact line + +**You MUST:** +- Actually READ the code at the file/line you cite +- Verify there's no sanitization/validation before this code +- Check for framework protections you might miss +- Provide the actual code snippet as evidence + +### Verify Before Reporting "Missing" Safeguards + +For findings claiming something is **missing** (no fallback, no validation, no error handling): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Read the **complete function/method** containing the issue, not just the flagged line +- Check for guards, fallbacks, or defensive code that may appear later in the function +- Look for comments indicating intentional design choices +- If uncertain, use the Read/Grep tools to confirm + +**Your evidence must prove absence exists β€” not just that you didn't see it.** + +❌ **Weak**: "The code defaults to 'main' without checking if it exists" +βœ… **Strong**: "I read the complete `_detect_target_branch()` function. There is no existence check before the default return." + +**Only report if you can confidently say**: "I verified the complete scope and the safeguard does not exist." + +## Evidence Requirements -Rate confidence (0.0-1.0) based on: -- **>0.9**: Obvious, verifiable issue -- **0.8-0.9**: High confidence with clear evidence -- **0.7-0.8**: Likely issue but some uncertainty -- **<0.7**: Possible issue, needs verification +Every finding MUST include an `evidence` field with: +- The actual problematic code copy-pasted from the diff +- The specific line numbers where the issue exists +- Proof that the issue is real, not speculative -Only report findings with confidence >0.7. +**No evidence = No finding** ## Output Format @@ -99,7 +148,7 @@ Return findings in this structure: "description": "The new login validation query concatenates user input directly into the SQL string without sanitization.", "category": "security", "severity": "critical", - "confidence": 0.95, + "evidence": "query = f\"SELECT * FROM users WHERE email = '{email}'\"", "suggested_fix": "Use parameterized queries: cursor.execute('SELECT * FROM users WHERE email = ?', (email,))", "fixable": true, "source_agent": "new-code-reviewer", @@ -113,7 +162,7 @@ Return findings in this structure: "description": "The fix for LOGIC-003 removed a null check that was protecting against undefined input. Now input.data can be null.", "category": "regression", "severity": "high", - "confidence": 0.88, + "evidence": "result = input.data.process() # input.data can be null, was previously: if input and input.data:", "suggested_fix": "Restore null check: if (input && input.data) { ... }", "fixable": true, "source_agent": "new-code-reviewer", diff --git a/apps/backend/prompts/github/pr_followup_orchestrator.md b/apps/backend/prompts/github/pr_followup_orchestrator.md index da2ee6b97a..4e714df4c3 100644 --- a/apps/backend/prompts/github/pr_followup_orchestrator.md +++ b/apps/backend/prompts/github/pr_followup_orchestrator.md @@ -9,6 +9,40 @@ Perform a focused, efficient follow-up review by: 2. Delegating to specialized agents based on what needs verification 3. Synthesizing findings into a final merge verdict +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it" +3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?" +4. **Breaking changes** - "This change breaks callers in other files" + +### What is NOT in scope (do NOT report): +1. **Pre-existing issues in unchanged code** - If old code has a bug but this PR didn't touch it, don't flag it +2. **Code from merged branches** - Commits with PR references like `(#584)` are from OTHER already-reviewed PRs +3. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch + +**Key distinction:** +- βœ… "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR changes) +- βœ… "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete change) +- ❌ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing issue, not this PR) +- ❌ "This code from commit `fix: something (#584)` has an issue" - BAD (different PR) + +**Why this matters:** +When authors merge the base branch into their feature branch, the commit range includes commits from other PRs. The context gathering system filters these out, but if any slip through, recognize them as out-of-scope. + +## Merge Conflicts + +**Check for merge conflicts in the follow-up context.** If `has_merge_conflicts` is `true`: + +1. **Report this prominently** - Merge conflicts block the PR from being merged +2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical" +3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved +4. **This may be NEW since last review** - Base branch may have changed + +Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state: +> "This PR has merge conflicts with the base branch that must be resolved before merging." + ## Available Specialist Agents You have access to these specialist agents via the Task tool: @@ -97,7 +131,21 @@ After all agents complete: ## Verdict Guidelines +### CRITICAL: CI Status ALWAYS Factors Into Verdict + +**CI status is provided in the context and MUST be considered:** + +- ❌ **Failing CI = BLOCKED** - If ANY CI checks are failing, verdict MUST be BLOCKED regardless of code quality +- ⏳ **Pending CI = NEEDS_REVISION** - If CI is still running, verdict cannot be READY_TO_MERGE +- ⏸️ **Awaiting approval = BLOCKED** - Fork PR workflows awaiting maintainer approval block merge +- βœ… **All passing = Continue with code analysis** - Only then do code findings determine verdict + +**Always mention CI status in your verdict_reasoning.** For example: +- "BLOCKED: 2 CI checks failing (CodeQL, test-frontend). Fix CI before merge." +- "READY_TO_MERGE: All CI checks passing and all findings resolved." + ### READY_TO_MERGE +- **All CI checks passing** (no failing, no pending) - All previous findings verified as resolved OR dismissed as false positives - No CONFIRMED_VALID critical/high issues remaining - No new critical/high issues @@ -105,11 +153,13 @@ After all agents complete: - Contributor questions addressed ### MERGE_WITH_CHANGES +- **All CI checks passing** - Previous findings resolved - Only LOW severity new issues (suggestions) - Optional polish items can be addressed post-merge ### NEEDS_REVISION (Strict Quality Gates) +- **CI checks pending** OR - HIGH or MEDIUM severity findings CONFIRMED_VALID (not dismissed as false positive) - New HIGH or MEDIUM severity issues introduced - Important contributor concerns unaddressed @@ -117,6 +167,8 @@ After all agents complete: - **Note: Only count findings that passed validation** (dismissed_false_positive findings don't block) ### BLOCKED +- **Any CI checks failing** OR +- **Workflows awaiting maintainer approval** (fork PRs) OR - CRITICAL findings remain CONFIRMED_VALID (not dismissed as false positive) - New CRITICAL issues introduced - Fundamental problems with the fix approach @@ -171,16 +223,36 @@ Provide your synthesis as a structured response matching the ParallelFollowupRes } ``` +## CRITICAL: NEVER ASSUME - ALWAYS VERIFY + +**This applies to ALL agents you invoke:** + +1. **NEVER assume a finding is valid** - The finding-validator MUST read the actual code +2. **NEVER assume a fix is correct** - The resolution-verifier MUST verify the change +3. **NEVER assume line numbers are accurate** - Files may be shorter than cited lines +4. **NEVER assume validation is missing** - Check callers and surrounding code +5. **NEVER trust the original finding's description** - It may have been hallucinated + +**Before ANY finding blocks merge:** +- The actual code at that location MUST be read +- The problematic pattern MUST exist as described +- There MUST NOT be mitigation/validation elsewhere +- The evidence MUST be copy-pasted from the actual file + +**Why this matters:** AI reviewers sometimes hallucinate findings. Without verification, +false positives persist forever and developers lose trust in the review system. + ## Important Notes 1. **Be efficient**: Follow-up reviews should be faster than initial reviews 2. **Focus on changes**: Only review what changed since last review -3. **Trust but verify**: Don't assume fixes are correct just because files changed +3. **VERIFY, don't assume**: Don't assume fixes are correct OR that findings are valid 4. **Acknowledge progress**: Recognize genuine effort to address feedback 5. **Be specific**: Clearly state what blocks merge if verdict is not READY_TO_MERGE ## Context You Will Receive +- **CI Status (CRITICAL)** - Passing/failing/pending checks and specific failed check names - Previous review summary and findings - New commits since last review (SHAs, messages) - Diff of changes since last review diff --git a/apps/backend/prompts/github/pr_followup_resolution_agent.md b/apps/backend/prompts/github/pr_followup_resolution_agent.md index c0e4c38f15..9e35b827db 100644 --- a/apps/backend/prompts/github/pr_followup_resolution_agent.md +++ b/apps/backend/prompts/github/pr_followup_resolution_agent.md @@ -10,6 +10,23 @@ For each previous finding, determine whether it has been: - **unresolved**: The issue remains or wasn't addressed - **cant_verify**: Not enough information to determine status +## CRITICAL: Verify Finding is In-Scope + +**Before verifying any finding, check if it's within THIS PR's scope:** + +1. **Is the file in the PR's changed files list?** - If not AND the finding isn't about impact, mark as `cant_verify` +2. **Does the line number exist?** - If finding cites line 710 but file has 600 lines, it was hallucinated +3. **Was this from a merged branch?** - Commits with PR references like `(#584)` are from other PRs + +**Mark as `cant_verify` if:** +- Finding references a file not in PR AND is not about impact of PR changes on that file +- Line number doesn't exist (hallucinated finding) +- Finding is about code from another PR's commits + +**Findings can reference files outside the PR if they're about:** +- Impact of PR changes (e.g., "change to X breaks caller in Y") +- Missing related updates (e.g., "you updated A but forgot B") + ## Verification Process For each previous finding: @@ -31,12 +48,26 @@ If the file was modified: - Is the fix approach sound? - Are there edge cases the fix misses? -### 4. Assign Confidence -Rate your confidence (0.0-1.0): -- **>0.9**: Clear evidence of resolution/non-resolution -- **0.7-0.9**: Strong indicators but some uncertainty -- **0.5-0.7**: Mixed signals, moderate confidence -- **<0.5**: Unclear, consider marking as cant_verify +### 4. Provide Evidence +For each verification, provide actual code evidence: +- **Copy-paste the relevant code** you examined +- **Show what changed** - before vs after +- **Explain WHY** this proves resolution/non-resolution + +## NEVER ASSUME - ALWAYS VERIFY + +**Before marking ANY finding as resolved or unresolved:** + +1. **NEVER assume a fix is correct** based on commit messages alone - READ the actual code +2. **NEVER assume the original finding was accurate** - The line might not even exist +3. **NEVER assume a renamed variable fixes a bug** - Check the actual logic changed +4. **NEVER assume "file was modified" means "issue was fixed"** - Verify the specific fix + +**You MUST:** +- Read the actual code at the cited location +- Verify the problematic pattern no longer exists (for resolved) +- Verify the pattern still exists (for unresolved) +- Check surrounding context for alternative fixes you might miss ## Resolution Criteria @@ -84,23 +115,20 @@ Return verifications in this structure: { "finding_id": "SEC-001", "status": "resolved", - "confidence": 0.92, - "evidence": "The SQL query at line 45 now uses parameterized queries instead of string concatenation. The fix properly escapes all user inputs.", - "resolution_notes": "Changed from f-string to cursor.execute() with parameters" + "evidence": "cursor.execute('SELECT * FROM users WHERE id = ?', (user_id,))", + "resolution_notes": "Changed from f-string to cursor.execute() with parameters. The code at line 45 now uses parameterized queries." }, { "finding_id": "QUAL-002", "status": "partially_resolved", - "confidence": 0.75, - "evidence": "Error handling was added for the main path, but the fallback path at line 78 still lacks try-catch.", + "evidence": "try:\n result = process(data)\nexcept Exception as e:\n log.error(e)\n# But fallback path at line 78 still has: result = fallback(data) # no try-catch", "resolution_notes": "Main function fixed, helper function still needs work" }, { "finding_id": "LOGIC-003", "status": "unresolved", - "confidence": 0.88, - "evidence": "The off-by-one error remains. The loop still uses `<= length` instead of `< length`.", - "resolution_notes": null + "evidence": "for i in range(len(items) + 1): # Still uses <= length", + "resolution_notes": "The off-by-one error remains at line 52." } ] ``` diff --git a/apps/backend/prompts/github/pr_logic_agent.md b/apps/backend/prompts/github/pr_logic_agent.md index 5b81b2bd6a..328ba13d06 100644 --- a/apps/backend/prompts/github/pr_logic_agent.md +++ b/apps/backend/prompts/github/pr_logic_agent.md @@ -6,6 +6,23 @@ You are a focused logic and correctness review agent. You have been spawned by t Verify that the code logic is correct, handles all edge cases, and doesn't introduce subtle bugs. Focus ONLY on logic and correctness issues - not style, security, or general quality. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Logic issues in changed code** - Bugs in files/lines modified by this PR +2. **Logic impact of changes** - "This change breaks the assumption in `caller.ts:50`" +3. **Incomplete state changes** - "You updated state X but forgot to reset Y" +4. **Edge cases in new code** - "New function doesn't handle empty array case" + +### What is NOT in scope (do NOT report): +1. **Pre-existing bugs** - Old logic issues in untouched code +2. **Unrelated improvements** - Don't suggest fixing bugs in code the PR didn't touch + +**Key distinction:** +- βœ… "Your change to `sort()` breaks callers expecting stable order" - GOOD (impact analysis) +- βœ… "Off-by-one error in your new loop" - GOOD (new code) +- ❌ "The old `parser.ts` has a race condition" - BAD (pre-existing, not this PR) + ## Logic Focus Areas ### 1. Algorithm Correctness @@ -61,6 +78,21 @@ Verify that the code logic is correct, handles all edge cases, and doesn't intro - Logic bugs must be demonstrable with a concrete example - If the edge case is theoretical without practical impact, don't report it +### Verify Before Claiming "Missing" Edge Case Handling + +When your finding claims an edge case is **not handled** (no check for empty, null, zero, etc.): + +**Ask yourself**: "Have I verified this case isn't handled, or did I just not see it?" + +- Read the **complete function** β€” guards often appear later or at the start +- Check callers β€” the edge case might be prevented by caller validation +- Look for early returns, assertions, or type guards you might have missed + +**Your evidence must prove absence β€” not just that you didn't see it.** + +❌ **Weak**: "Empty array case is not handled" +βœ… **Strong**: "I read the complete function (lines 12-45). There's no check for empty arrays, and the code directly accesses `arr[0]` on line 15 without any guard." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Bug that will cause wrong results or crashes in production - Example: Off-by-one causing data corruption, race condition causing lost updates diff --git a/apps/backend/prompts/github/pr_parallel_orchestrator.md b/apps/backend/prompts/github/pr_parallel_orchestrator.md index fbe34fb930..b26ffa97cf 100644 --- a/apps/backend/prompts/github/pr_parallel_orchestrator.md +++ b/apps/backend/prompts/github/pr_parallel_orchestrator.md @@ -6,6 +6,34 @@ You are an expert PR reviewer orchestrating a comprehensive, parallel code revie **YOU decide which agents to invoke based on YOUR analysis of the PR.** There are no programmatic rules - you evaluate the PR's content, complexity, and risk areas, then delegate to the appropriate specialists. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it" +3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?" +4. **Breaking changes** - "This change breaks callers in other files" + +### What is NOT in scope (do NOT report): +1. **Pre-existing issues** - Old bugs/issues in code this PR didn't touch +2. **Unrelated improvements** - Don't suggest refactoring untouched code + +**Key distinction:** +- βœ… "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR) +- βœ… "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete) +- ❌ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing, not this PR) + +## Merge Conflicts + +**Check for merge conflicts in the PR context.** If `has_merge_conflicts` is `true`: + +1. **Report this prominently** - Merge conflicts block the PR from being merged +2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical" +3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved + +Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state: +> "This PR has merge conflicts with the base branch that must be resolved before merging." + ## Available Specialist Agents You have access to these specialized review agents via the Task tool: diff --git a/apps/backend/prompts/github/pr_quality_agent.md b/apps/backend/prompts/github/pr_quality_agent.md index f3007f1f81..7a3445fce6 100644 --- a/apps/backend/prompts/github/pr_quality_agent.md +++ b/apps/backend/prompts/github/pr_quality_agent.md @@ -6,6 +6,23 @@ You are a focused code quality review agent. You have been spawned by the orches Perform a thorough code quality review of the provided code changes. Focus on maintainability, correctness, and adherence to best practices. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Quality issues in changed code** - Problems in files/lines modified by this PR +2. **Quality impact of changes** - "This change increases complexity of `handler.ts`" +3. **Incomplete refactoring** - "You cleaned up X but similar pattern in Y wasn't updated" +4. **New code not following patterns** - "New function doesn't match project's error handling pattern" + +### What is NOT in scope (do NOT report): +1. **Pre-existing quality issues** - Old code smells in untouched code +2. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch + +**Key distinction:** +- βœ… "Your new function has high cyclomatic complexity" - GOOD (new code) +- βœ… "This duplicates existing helper in `utils.ts`, consider reusing it" - GOOD (guidance) +- ❌ "The old `legacy.ts` file has 1000 lines" - BAD (pre-existing, not this PR) + ## Quality Focus Areas ### 1. Code Complexity @@ -62,6 +79,21 @@ Perform a thorough code quality review of the provided code changes. Focus on ma - If it's subjective or debatable, don't report it - Focus on objective quality issues +### Verify Before Claiming "Missing" Handling + +When your finding claims something is **missing** (no error handling, no fallback, no cleanup): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Read the **complete function**, not just the flagged line β€” error handling often appears later +- Check for try/catch blocks, guards, or fallbacks you might have missed +- Look for framework-level handling (global error handlers, middleware) + +**Your evidence must prove absence β€” not just that you didn't see it.** + +❌ **Weak**: "This async call has no error handling" +βœ… **Strong**: "I read the complete `processOrder()` function (lines 34-89). The `fetch()` call on line 45 has no try/catch, and there's no `.catch()` anywhere in the function." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Bug that will cause failures in production - Example: Unhandled promise rejection, memory leak diff --git a/apps/backend/prompts/github/pr_reviewer.md b/apps/backend/prompts/github/pr_reviewer.md index 72a8b5dada..93d16ec4cb 100644 --- a/apps/backend/prompts/github/pr_reviewer.md +++ b/apps/backend/prompts/github/pr_reviewer.md @@ -4,24 +4,49 @@ You are a senior software engineer and security specialist performing a comprehensive code review. You have deep expertise in security vulnerabilities, code quality, software architecture, and industry best practices. Your reviews are thorough yet focused on issues that genuinely impact code security, correctness, and maintainability. -## Review Methodology: Chain-of-Thought Analysis +## Review Methodology: Evidence-Based Analysis For each potential issue you consider: 1. **First, understand what the code is trying to do** - What is the developer's intent? What problem are they solving? 2. **Analyze if there are any problems with this approach** - Are there security risks, bugs, or design issues? 3. **Assess the severity and real-world impact** - Can this be exploited? Will this cause production issues? How likely is it to occur? -4. **Apply the 80% confidence threshold** - Only report if you have >80% confidence this is a genuine issue with real impact +4. **REQUIRE EVIDENCE** - Only report if you can show the actual problematic code snippet 5. **Provide a specific, actionable fix** - Give the developer exactly what they need to resolve the issue -## Confidence Requirements +## Evidence Requirements -**CRITICAL: Quality over quantity** +**CRITICAL: No evidence = No finding** -- Only report findings where you have **>80% confidence** this is a real issue -- If uncertain or it "could be a problem in theory," **DO NOT include it** -- **5 high-quality findings are far better than 15 low-quality ones** -- Each finding should pass the test: "Would I stake my reputation on this being a genuine issue?" +- **Every finding MUST include actual code evidence** (the `evidence` field with a copy-pasted code snippet) +- If you can't show the problematic code, **DO NOT report the finding** +- The evidence must be verifiable - it should exist at the file and line you specify +- **5 evidence-backed findings are far better than 15 speculative ones** +- Each finding should pass the test: "Can I prove this with actual code from the file?" + +## NEVER ASSUME - ALWAYS VERIFY + +**This is the most important rule for avoiding false positives:** + +1. **NEVER assume code is vulnerable** - Read the actual implementation first +2. **NEVER assume validation is missing** - Check callers and surrounding code for sanitization +3. **NEVER assume a pattern is dangerous** - Verify there's no framework protection or mitigation +4. **NEVER report based on function names alone** - A function called `unsafeQuery` might actually be safe +5. **NEVER extrapolate from one line** - Read Β±20 lines of context minimum + +**Before reporting ANY finding, you MUST:** +- Actually read the code at the file/line you're about to cite +- Verify the problematic pattern exists exactly as you describe +- Check if there's validation/sanitization before or after +- Confirm the code path is actually reachable +- Verify the line number exists (file might be shorter than you think) + +**Common false positive causes to avoid:** +- Reporting line 500 when the file only has 400 lines (hallucination) +- Claiming "no validation" when validation exists in the caller +- Flagging parameterized queries as SQL injection (framework protection) +- Reporting XSS when output is auto-escaped by the framework +- Citing code that was already fixed in an earlier commit ## Anti-Patterns to Avoid @@ -214,14 +239,13 @@ Return a JSON array with this structure: "id": "finding-1", "severity": "critical", "category": "security", - "confidence": 0.95, "title": "SQL Injection vulnerability in user search", "description": "The search query parameter is directly interpolated into the SQL string without parameterization. This allows attackers to execute arbitrary SQL commands by injecting malicious input like `' OR '1'='1`.", "impact": "An attacker can read, modify, or delete any data in the database, including sensitive user information, payment details, or admin credentials. This could lead to complete data breach.", "file": "src/api/users.ts", "line": 42, "end_line": 45, - "code_snippet": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`", + "evidence": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`", "suggested_fix": "Use parameterized queries to prevent SQL injection:\n\nconst query = 'SELECT * FROM users WHERE name LIKE ?';\nconst results = await db.query(query, [`%${searchTerm}%`]);", "fixable": true, "references": ["https://owasp.org/www-community/attacks/SQL_Injection"] @@ -230,13 +254,12 @@ Return a JSON array with this structure: "id": "finding-2", "severity": "high", "category": "security", - "confidence": 0.88, "title": "Missing authorization check allows privilege escalation", "description": "The deleteUser endpoint only checks if the user is authenticated, but doesn't verify if they have admin privileges. Any logged-in user can delete other user accounts.", "impact": "Regular users can delete admin accounts or any other user, leading to service disruption, data loss, and potential account takeover attacks.", "file": "src/api/admin.ts", "line": 78, - "code_snippet": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});", + "evidence": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});", "suggested_fix": "Add authorization check:\n\nrouter.delete('/users/:id', authenticate, requireAdmin, async (req, res) => {\n await User.delete(req.params.id);\n});\n\n// Or inline:\nif (!req.user.isAdmin) {\n return res.status(403).json({ error: 'Admin access required' });\n}", "fixable": true, "references": ["https://owasp.org/Top10/A01_2021-Broken_Access_Control/"] @@ -245,13 +268,13 @@ Return a JSON array with this structure: "id": "finding-3", "severity": "medium", "category": "quality", - "confidence": 0.82, "title": "Function exceeds complexity threshold", "description": "The processPayment function has 15 conditional branches, making it difficult to test all paths and maintain. High cyclomatic complexity increases bug risk.", "impact": "High complexity functions are more likely to contain bugs, harder to test comprehensively, and difficult for other developers to understand and modify safely.", "file": "src/payments/processor.ts", "line": 125, "end_line": 198, + "evidence": "async function processPayment(payment: Payment): Promise {\n if (payment.type === 'credit') { ... } else if (payment.type === 'debit') { ... }\n // 15+ branches follow\n}", "suggested_fix": "Extract sub-functions to reduce complexity:\n\n1. validatePaymentData(payment) - handle all validation\n2. calculateFees(amount, type) - fee calculation logic\n3. processRefund(payment) - refund-specific logic\n4. sendPaymentNotification(payment, status) - notification logic\n\nThis will reduce the main function to orchestration only.", "fixable": false, "references": [] @@ -270,19 +293,18 @@ Return a JSON array with this structure: - **medium** (Recommended): Improve code quality (maintainability concerns) - **Blocks merge: YES** (AI fixes quickly) - **low** (Suggestion): Suggestions for improvement (minor enhancements) - **Blocks merge: NO** - **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance` -- **confidence**: Float 0.0-1.0 representing your confidence this is a genuine issue (must be β‰₯0.80) - **title**: Short, specific summary (max 80 chars) - **description**: Detailed explanation of the issue - **impact**: Real-world consequences if not fixed (business/security/user impact) - **file**: Relative file path - **line**: Starting line number +- **evidence**: **REQUIRED** - Actual code snippet from the file proving the issue exists. Must be copy-pasted from the actual code. - **suggested_fix**: Specific code changes or guidance to resolve the issue - **fixable**: Boolean - can this be auto-fixed by a code tool? ### Optional Fields - **end_line**: Ending line number for multi-line issues -- **code_snippet**: The problematic code excerpt - **references**: Array of relevant URLs (OWASP, CVE, documentation) ## Guidelines for High-Quality Reviews @@ -292,7 +314,7 @@ Return a JSON array with this structure: 3. **Explain impact**: Don't just say what's wrong, explain the real-world consequences 4. **Prioritize ruthlessly**: Focus on issues that genuinely matter 5. **Consider context**: Understand the purpose of changed code before flagging issues -6. **Validate confidence**: If you're not >80% sure, don't report it +6. **Require evidence**: Always include the actual code snippet in the `evidence` field - no code, no finding 7. **Provide references**: Link to OWASP, CVE databases, or official documentation when relevant 8. **Think like an attacker**: For security issues, explain how it could be exploited 9. **Be constructive**: Frame issues as opportunities to improve, not criticisms @@ -314,13 +336,12 @@ Return a JSON array with this structure: "id": "finding-auth-1", "severity": "critical", "category": "security", - "confidence": 0.92, "title": "JWT secret hardcoded in source code", "description": "The JWT signing secret 'super-secret-key-123' is hardcoded in the authentication middleware. Anyone with access to the source code can forge authentication tokens for any user.", "impact": "An attacker can create valid JWT tokens for any user including admins, leading to complete account takeover and unauthorized access to all user data and admin functions.", "file": "src/middleware/auth.ts", "line": 12, - "code_snippet": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);", + "evidence": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);", "suggested_fix": "Move the secret to environment variables:\n\n// In .env file:\nJWT_SECRET=\n\n// In auth.ts:\nconst SECRET = process.env.JWT_SECRET;\nif (!SECRET) {\n throw new Error('JWT_SECRET not configured');\n}\njwt.sign(payload, SECRET);", "fixable": true, "references": [ @@ -332,4 +353,4 @@ Return a JSON array with this structure: --- -Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. Quality over quantity. Be thorough but focused. +Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. **Every finding must include code evidence** - if you can't show the actual code, don't report the finding. Quality over quantity. Be thorough but focused. diff --git a/apps/backend/prompts/github/pr_security_agent.md b/apps/backend/prompts/github/pr_security_agent.md index e2c3ae3686..15061038b4 100644 --- a/apps/backend/prompts/github/pr_security_agent.md +++ b/apps/backend/prompts/github/pr_security_agent.md @@ -6,6 +6,23 @@ You are a focused security review agent. You have been spawned by the orchestrat Perform a thorough security review of the provided code changes, focusing ONLY on security vulnerabilities. Do not review code quality, style, or other non-security concerns. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Security issues in changed code** - Vulnerabilities introduced or modified by this PR +2. **Security impact of changes** - "This change exposes sensitive data to the new endpoint" +3. **Missing security for new features** - "New API endpoint lacks authentication" +4. **Broken security assumptions** - "Change to auth.ts invalidates security check in handler.ts" + +### What is NOT in scope (do NOT report): +1. **Pre-existing vulnerabilities** - Old security issues in code this PR didn't touch +2. **Unrelated security improvements** - Don't suggest hardening untouched code + +**Key distinction:** +- βœ… "Your new endpoint lacks rate limiting" - GOOD (new code) +- βœ… "This change bypasses the auth check in `middleware.ts`" - GOOD (impact analysis) +- ❌ "The old `legacy_auth.ts` uses MD5 for passwords" - BAD (pre-existing, not this PR) + ## Security Focus Areas ### 1. Injection Vulnerabilities @@ -57,6 +74,21 @@ Perform a thorough security review of the provided code changes, focusing ONLY o - If you're unsure, don't report it - Prefer false negatives over false positives +### Verify Before Claiming "Missing" Protections + +When your finding claims protection is **missing** (no validation, no sanitization, no auth check): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Check if validation/sanitization exists elsewhere (middleware, caller, framework) +- Read the **complete function**, not just the flagged line +- Look for comments explaining why something appears unprotected + +**Your evidence must prove absence β€” not just that you didn't see it.** + +❌ **Weak**: "User input is used without validation" +βœ… **Strong**: "I checked the complete request flow. Input reaches this SQL query without passing through any validation or sanitization layer." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Exploitable vulnerability leading to data breach, RCE, or system compromise - Example: SQL injection, hardcoded admin password diff --git a/apps/backend/prompts/qa_fixer.md b/apps/backend/prompts/qa_fixer.md index 8507756946..fe5c018025 100644 --- a/apps/backend/prompts/qa_fixer.md +++ b/apps/backend/prompts/qa_fixer.md @@ -80,6 +80,68 @@ lsof -iTCP -sTCP:LISTEN | grep -E "node|python|next|vite" --- +## 🚨 CRITICAL: PATH CONFUSION PREVENTION 🚨 + +**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands** + +### The Problem + +After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`. + +### The Solution: ALWAYS CHECK YOUR CWD + +**BEFORE every git command or file operation:** + +```bash +# Step 1: Check where you are +pwd + +# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY +# If pwd shows: /path/to/project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts +``` + +### Examples + +**❌ WRONG - Path gets doubled:** +```bash +cd ./apps/frontend +git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts +``` + +**βœ… CORRECT - Use relative path from current directory:** +```bash +cd ./apps/frontend +pwd # Shows: /path/to/project/apps/frontend +git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root +``` + +**βœ… ALSO CORRECT - Stay at root, use full relative path:** +```bash +# Don't change directory at all +git add ./apps/frontend/src/file.ts # Works from project root +``` + +### Mandatory Pre-Command Check + +**Before EVERY git add, git commit, or file operation in a monorepo:** + +```bash +# 1. Where am I? +pwd + +# 2. What files am I targeting? +ls -la [target-path] # Verify the path exists + +# 3. Only then run the command +git add [verified-path] +``` + +**This check takes 2 seconds and prevents hours of debugging.** + +--- + ## PHASE 3: FIX ISSUES ONE BY ONE For each issue in the fix request: @@ -166,8 +228,45 @@ If any issue is not fixed, go back to Phase 3. ## PHASE 6: COMMIT FIXES +### Path Verification (MANDATORY FIRST STEP) + +**🚨 BEFORE running ANY git commands, verify your current directory:** + ```bash -git add . +# Step 1: Where am I? +pwd + +# Step 2: What files do I want to commit? +# If you changed to a subdirectory (e.g., cd apps/frontend), +# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root + +# Step 3: Verify paths exist +ls -la [path-to-files] # Make sure the path is correct from your current location + +# Example in a monorepo: +# If pwd shows: /project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts) +``` + +**CRITICAL RULE:** If you're in a subdirectory, either: +- **Option A:** Return to project root: `cd [back to working directory]` +- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`) + +### Create the Commit + +```bash +# FIRST: Make sure you're in the working directory root +pwd # Should match your working directory + +# Add all files EXCEPT .auto-claude directory (spec files should never be committed) +git add . ':!.auto-claude' + +# If git add fails with "pathspec did not match", you have a path problem: +# 1. Run pwd to see where you are +# 2. Run git status to see what git sees +# 3. Adjust your paths accordingly + git commit -m "fix: Address QA issues (qa-requested) Fixes: @@ -182,6 +281,8 @@ Verified: QA Fix Session: [N]" ``` +**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed. + **NOTE**: Do NOT push to remote. All work stays local until user reviews and approves. --- @@ -304,6 +405,13 @@ npx prisma migrate dev --name [name] - How you verified - Commit messages +### Git Configuration - NEVER MODIFY +**CRITICAL**: You MUST NOT modify git user configuration. Never run: +- `git config user.name` +- `git config user.email` + +The repository inherits the user's configured git identity. Do NOT set test users. + --- ## QA LOOP BEHAVIOR diff --git a/apps/backend/prompts/qa_reviewer.md b/apps/backend/prompts/qa_reviewer.md index d986a41b6e..ff52320a6b 100644 --- a/apps/backend/prompts/qa_reviewer.md +++ b/apps/backend/prompts/qa_reviewer.md @@ -35,8 +35,8 @@ cat project_index.json # 4. Check build progress cat build-progress.txt -# 5. See what files were changed -git diff main --name-only +# 5. See what files were changed (three-dot diff shows only spec branch changes) +git diff {{BASE_BRANCH}}...HEAD --name-status # 6. Read QA acceptance criteria from spec grep -A 100 "## QA Acceptance Criteria" spec.md @@ -514,7 +514,7 @@ All acceptance criteria verified: The implementation is production-ready. Sign-off recorded in implementation_plan.json. -Ready for merge to main. +Ready for merge to {{BASE_BRANCH}}. ``` ### If Rejected: diff --git a/apps/backend/prompts_pkg/prompt_generator.py b/apps/backend/prompts_pkg/prompt_generator.py index 15d2bc9b09..ebd9148854 100644 --- a/apps/backend/prompts_pkg/prompt_generator.py +++ b/apps/backend/prompts_pkg/prompt_generator.py @@ -62,6 +62,11 @@ def generate_environment_context(project_dir: Path, spec_dir: Path) -> str: Your filesystem is restricted to your working directory. All file paths should be relative to this location. Do NOT use absolute paths. +**⚠️ CRITICAL:** Before ANY git command or file operation, run `pwd` to verify your current +directory. If you've used `cd` to change directories, you MUST use paths relative to your +NEW location, not the working directory. See the PATH CONFUSION PREVENTION section in the +coder prompt for detailed examples. + **Important Files:** - Spec: `{relative_spec}/spec.md` - Plan: `{relative_spec}/implementation_plan.json` diff --git a/apps/backend/prompts_pkg/prompts.py b/apps/backend/prompts_pkg/prompts.py index acb29d7332..83a8726926 100644 --- a/apps/backend/prompts_pkg/prompts.py +++ b/apps/backend/prompts_pkg/prompts.py @@ -7,7 +7,9 @@ """ import json +import os import re +import subprocess from pathlib import Path from .project_context import ( @@ -16,6 +18,133 @@ load_project_index, ) + +def _validate_branch_name(branch: str | None) -> str | None: + """ + Validate a git branch name for safety and correctness. + + Args: + branch: The branch name to validate + + Returns: + The validated branch name, or None if invalid + """ + if not branch or not isinstance(branch, str): + return None + + # Trim whitespace + branch = branch.strip() + + # Reject empty or whitespace-only strings + if not branch: + return None + + # Enforce maximum length (git refs can be long, but 255 is reasonable) + if len(branch) > 255: + return None + + # Require at least one alphanumeric character + if not any(c.isalnum() for c in branch): + return None + + # Only allow common git-ref characters: letters, numbers, ., _, -, / + # This prevents prompt injection and other security issues + if not re.match(r"^[A-Za-z0-9._/-]+$", branch): + return None + + # Reject suspicious patterns that could be prompt injection attempts + # (newlines, control characters are already blocked by the regex above) + + return branch + + +def _get_base_branch_from_metadata(spec_dir: Path) -> str | None: + """ + Read baseBranch from task_metadata.json if it exists. + + Args: + spec_dir: Directory containing the spec files + + Returns: + The baseBranch from metadata, or None if not found or invalid + """ + metadata_path = spec_dir / "task_metadata.json" + if metadata_path.exists(): + try: + with open(metadata_path, encoding="utf-8") as f: + metadata = json.load(f) + base_branch = metadata.get("baseBranch") + # Validate the branch name before returning + return _validate_branch_name(base_branch) + except (json.JSONDecodeError, OSError): + pass + return None + + +def _detect_base_branch(spec_dir: Path, project_dir: Path) -> str: + """ + Detect the base branch for a project/task. + + Priority order: + 1. baseBranch from task_metadata.json (task-level override) + 2. DEFAULT_BRANCH environment variable + 3. Auto-detect main/master/develop (if they exist in git) + 4. Fall back to "main" + + Args: + spec_dir: Directory containing the spec files + project_dir: Project root directory + + Returns: + The detected base branch name + """ + # 1. Check task_metadata.json for task-specific baseBranch + metadata_branch = _get_base_branch_from_metadata(spec_dir) + if metadata_branch: + return metadata_branch + + # 2. Check for DEFAULT_BRANCH env var + env_branch = _validate_branch_name(os.getenv("DEFAULT_BRANCH")) + if env_branch: + # Verify the branch exists (with timeout to prevent hanging) + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", env_branch], + cwd=project_dir, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=3, + ) + if result.returncode == 0: + return env_branch + except subprocess.TimeoutExpired: + # Treat timeout as branch verification failure + pass + + # 3. Auto-detect main/master/develop + for branch in ["main", "master", "develop"]: + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", branch], + cwd=project_dir, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=3, + ) + if result.returncode == 0: + return branch + except subprocess.TimeoutExpired: + # Treat timeout as branch verification failure, try next branch + continue + + # 4. Fall back to "main" + return "main" + + # Directory containing prompt files # prompts/ is a sibling directory of prompts_pkg/, so go up one level first PROMPTS_DIR = Path(__file__).parent.parent / "prompts" @@ -304,6 +433,7 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: 1. Loads the base QA reviewer prompt 2. Detects project capabilities from project_index.json 3. Injects only relevant MCP tool documentation (Electron, Puppeteer, DB, API) + 4. Detects and injects the correct base branch for git comparisons This saves context window by excluding irrelevant tool docs. For example, a CLI Python project won't get Electron validation docs. @@ -315,9 +445,15 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: Returns: The QA reviewer prompt with project-specific tools injected """ + # Detect the base branch for this task (from task_metadata.json or auto-detect) + base_branch = _detect_base_branch(spec_dir, project_dir) + # Load base QA reviewer prompt base_prompt = _load_prompt_file("qa_reviewer.md") + # Replace {{BASE_BRANCH}} placeholder with the actual base branch + base_prompt = base_prompt.replace("{{BASE_BRANCH}}", base_branch) + # Load project index and detect capabilities project_index = load_project_index(project_dir) capabilities = detect_project_capabilities(project_index) @@ -347,6 +483,17 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: The project root is: `{project_dir}` +## GIT BRANCH CONFIGURATION + +**Base branch for comparison:** `{base_branch}` + +When checking for unrelated changes, use three-dot diff syntax: +```bash +git diff {base_branch}...HEAD --name-status +``` + +This shows only changes made in the spec branch since it diverged from `{base_branch}`. + --- ## PROJECT CAPABILITIES DETECTED diff --git a/apps/backend/qa/loop.py b/apps/backend/qa/loop.py index ff8308695e..fcbc1c7f34 100644 --- a/apps/backend/qa/loop.py +++ b/apps/backend/qa/loop.py @@ -6,6 +6,7 @@ approval or max iterations. """ +import os import time as time_module from pathlib import Path @@ -22,6 +23,7 @@ from phase_config import get_phase_model, get_phase_thinking_budget from phase_event import ExecutionPhase, emit_phase from progress import count_subtasks, is_build_complete +from security.constants import PROJECT_DIR_ENV_VAR from task_logger import ( LogPhase, get_task_logger, @@ -83,6 +85,10 @@ async def run_qa_validation_loop( Returns: True if QA approved, False otherwise """ + # Set environment variable for security hooks to find the correct project directory + # This is needed because os.getcwd() may return the wrong directory in worktree mode + os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve()) + debug_section("qa_loop", "QA Validation Loop") debug( "qa_loop", diff --git a/apps/backend/query_memory.py b/apps/backend/query_memory.py index c16f82d943..e729e892bd 100644 --- a/apps/backend/query_memory.py +++ b/apps/backend/query_memory.py @@ -185,24 +185,31 @@ def cmd_get_memories(args): """ result = conn.execute(query, parameters={"limit": limit}) - df = result.get_as_df() + # Process results without pandas (iterate through result set directly) memories = [] - for _, row in df.iterrows(): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, created_at, content, description, group_id + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + created_at_val = serialize_value(row[2]) if len(row) > 2 else None + content_val = serialize_value(row[3]) if len(row) > 3 else "" + description_val = serialize_value(row[4]) if len(row) > 4 else "" + group_id_val = serialize_value(row[5]) if len(row) > 5 else "" + memory = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_episode_type(row.get("name", ""), row.get("content", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("content") - or row.get("description") - or row.get("name", ""), - "description": row.get("description", ""), - "group_id": row.get("group_id", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_episode_type(name_val or "", content_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": content_val or description_val or name_val or "", + "description": description_val or "", + "group_id": group_id_val or "", } # Extract session number if present - session_num = extract_session_number(row.get("name", "")) + session_num = extract_session_number(name_val or "") if session_num: memory["session_number"] = session_num @@ -251,24 +258,31 @@ def cmd_search(args): result = conn.execute( query, parameters={"search_query": search_query, "limit": limit} ) - df = result.get_as_df() + # Process results without pandas memories = [] - for _, row in df.iterrows(): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, created_at, content, description, group_id + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + created_at_val = serialize_value(row[2]) if len(row) > 2 else None + content_val = serialize_value(row[3]) if len(row) > 3 else "" + description_val = serialize_value(row[4]) if len(row) > 4 else "" + group_id_val = serialize_value(row[5]) if len(row) > 5 else "" + memory = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_episode_type(row.get("name", ""), row.get("content", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("content") - or row.get("description") - or row.get("name", ""), - "description": row.get("description", ""), - "group_id": row.get("group_id", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_episode_type(name_val or "", content_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": content_val or description_val or name_val or "", + "description": description_val or "", + "group_id": group_id_val or "", "score": 1.0, # Keyword match score } - session_num = extract_session_number(row.get("name", "")) + session_num = extract_session_number(name_val or "") if session_num: memory["session_number"] = session_num @@ -461,19 +475,26 @@ def cmd_get_entities(args): """ result = conn.execute(query, parameters={"limit": limit}) - df = result.get_as_df() + # Process results without pandas entities = [] - for _, row in df.iterrows(): - if not row.get("summary"): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, summary, created_at + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + summary_val = serialize_value(row[2]) if len(row) > 2 else "" + created_at_val = serialize_value(row[3]) if len(row) > 3 else None + + if not summary_val: continue entity = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_entity_type(row.get("name", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("summary", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_entity_type(name_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": summary_val or "", } entities.append(entity) @@ -488,6 +509,118 @@ def cmd_get_entities(args): output_error(f"Query failed: {e}") +def cmd_add_episode(args): + """ + Add a new episode to the memory database. + + This is called from the Electron main process to save PR review insights, + patterns, gotchas, and other memories directly to the LadybugDB database. + + Args: + args.db_path: Path to database directory + args.database: Database name + args.name: Episode name/title + args.content: Episode content (JSON string) + args.episode_type: Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review) + args.group_id: Optional group ID for namespacing + """ + if not apply_monkeypatch(): + output_error("Neither kuzu nor LadybugDB is installed") + return + + try: + import uuid as uuid_module + + try: + import kuzu + except ImportError: + import real_ladybug as kuzu + + # Parse content from JSON if provided + content = args.content + if content: + try: + # Try to parse as JSON to validate + parsed = json.loads(content) + # Re-serialize to ensure consistent formatting + content = json.dumps(parsed) + except json.JSONDecodeError: + # If not valid JSON, use as-is + pass + + # Generate unique ID + episode_uuid = str(uuid_module.uuid4()) + created_at = datetime.now().isoformat() + + # Get database path - create directory if needed + full_path = Path(args.db_path) / args.database + if not full_path.exists(): + # For new databases, create the parent directory + Path(args.db_path).mkdir(parents=True, exist_ok=True) + + # Open database (creates it if it doesn't exist) + db = kuzu.Database(str(full_path)) + conn = kuzu.Connection(db) + + # Always try to create the Episodic table if it doesn't exist + # This handles both new databases and existing databases without the table + try: + conn.execute(""" + CREATE NODE TABLE IF NOT EXISTS Episodic ( + uuid STRING PRIMARY KEY, + name STRING, + content STRING, + source_description STRING, + group_id STRING, + created_at STRING + ) + """) + except Exception as schema_err: + # Table might already exist with different schema - that's ok + # The insert will fail if schema is incompatible + sys.stderr.write(f"Schema creation note: {schema_err}\n") + + # Insert the episode + try: + insert_query = """ + CREATE (e:Episodic { + uuid: $uuid, + name: $name, + content: $content, + source_description: $description, + group_id: $group_id, + created_at: $created_at + }) + """ + conn.execute( + insert_query, + parameters={ + "uuid": episode_uuid, + "name": args.name, + "content": content, + "description": f"[{args.episode_type}] {args.name}", + "group_id": args.group_id or "", + "created_at": created_at, + }, + ) + + output_json( + True, + data={ + "id": episode_uuid, + "name": args.name, + "type": args.episode_type, + "timestamp": created_at, + }, + ) + + except Exception as e: + output_error(f"Failed to insert episode: {e}") + + except Exception as e: + output_error(f"Failed to add episode: {e}") + + def infer_episode_type(name: str, content: str = "") -> str: """Infer the episode type from its name and content.""" name_lower = (name or "").lower() @@ -580,6 +713,27 @@ def main(): "--limit", type=int, default=20, help="Maximum results" ) + # add-episode command (for saving memories from Electron app) + add_parser = subparsers.add_parser( + "add-episode", + help="Add an episode to the memory database (called from Electron)", + ) + add_parser.add_argument("db_path", help="Path to database directory") + add_parser.add_argument("database", help="Database name") + add_parser.add_argument("--name", required=True, help="Episode name/title") + add_parser.add_argument( + "--content", required=True, help="Episode content (JSON string)" + ) + add_parser.add_argument( + "--type", + dest="episode_type", + default="session_insight", + help="Episode type (session_insight, pattern, gotcha, task_outcome, pr_review)", + ) + add_parser.add_argument( + "--group-id", dest="group_id", help="Optional group ID for namespacing" + ) + args = parser.parse_args() if not args.command: @@ -594,6 +748,7 @@ def main(): "search": cmd_search, "semantic-search": cmd_semantic_search, "get-entities": cmd_get_entities, + "add-episode": cmd_add_episode, } handler = commands.get(args.command) diff --git a/apps/backend/requirements.txt b/apps/backend/requirements.txt index 59aec7b0ee..6ea5fe0b7b 100644 --- a/apps/backend/requirements.txt +++ b/apps/backend/requirements.txt @@ -15,3 +15,7 @@ google-generativeai>=0.8.0 # Pydantic for structured output schemas pydantic>=2.0.0 + +# Azure DevOps Integration (optional - for ADO provider) +azure-devops>=7.1.0b4,<8.0.0 +msrest>=0.7.1,<1.0.0 diff --git a/apps/backend/runners/ai_analyzer/claude_client.py b/apps/backend/runners/ai_analyzer/claude_client.py index e1f5a669dc..5d3f07121a 100644 --- a/apps/backend/runners/ai_analyzer/claude_client.py +++ b/apps/backend/runners/ai_analyzer/claude_client.py @@ -8,6 +8,7 @@ try: from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient + from phase_config import resolve_model_id CLAUDE_SDK_AVAILABLE = True except ImportError: @@ -17,7 +18,7 @@ class ClaudeAnalysisClient: """Wrapper for Claude SDK client with analysis-specific configuration.""" - DEFAULT_MODEL = "claude-sonnet-4-5-20250929" + DEFAULT_MODEL = "sonnet" # Shorthand - resolved via API Profile if configured ALLOWED_TOOLS = ["Read", "Glob", "Grep"] MAX_TURNS = 50 @@ -110,7 +111,7 @@ def _create_client(self, settings_file: Path) -> Any: return ClaudeSDKClient( options=ClaudeAgentOptions( - model=self.DEFAULT_MODEL, + model=resolve_model_id(self.DEFAULT_MODEL), # Resolve via API Profile system_prompt=system_prompt, allowed_tools=self.ALLOWED_TOOLS, max_turns=self.MAX_TURNS, diff --git a/apps/backend/runners/github/cleanup_pr_worktrees.py b/apps/backend/runners/github/cleanup_pr_worktrees.py new file mode 100755 index 0000000000..1a40688f9f --- /dev/null +++ b/apps/backend/runners/github/cleanup_pr_worktrees.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +""" +PR Worktree Cleanup Utility +============================ + +Command-line tool for managing PR review worktrees. + +Usage: + python cleanup_pr_worktrees.py --list # List all worktrees + python cleanup_pr_worktrees.py --cleanup # Run cleanup policies + python cleanup_pr_worktrees.py --cleanup-all # Remove ALL worktrees + python cleanup_pr_worktrees.py --stats # Show cleanup statistics +""" + +import argparse + +# Load module directly to avoid import issues +import importlib.util +import sys +from pathlib import Path + +services_dir = Path(__file__).parent / "services" +module_path = services_dir / "pr_worktree_manager.py" + +spec = importlib.util.spec_from_file_location("pr_worktree_manager", module_path) +pr_worktree_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(pr_worktree_module) + +PRWorktreeManager = pr_worktree_module.PRWorktreeManager +DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = pr_worktree_module.DEFAULT_PR_WORKTREE_MAX_AGE_DAYS +DEFAULT_MAX_PR_WORKTREES = pr_worktree_module.DEFAULT_MAX_PR_WORKTREES +_get_max_age_days = pr_worktree_module._get_max_age_days +_get_max_pr_worktrees = pr_worktree_module._get_max_pr_worktrees + + +def find_project_root() -> Path: + """Find the git project root directory.""" + current = Path.cwd() + while current != current.parent: + if (current / ".git").exists(): + return current + current = current.parent + raise RuntimeError("Not in a git repository") + + +def list_worktrees(manager: PRWorktreeManager) -> None: + """List all PR review worktrees.""" + worktrees = manager.get_worktree_info() + + if not worktrees: + print("No PR review worktrees found.") + return + + print(f"\nFound {len(worktrees)} PR review worktrees:\n") + print(f"{'Directory':<40} {'Age (days)':<12} {'PR':<6}") + print("-" * 60) + + for wt in worktrees: + pr_str = f"#{wt.pr_number}" if wt.pr_number else "N/A" + print(f"{wt.path.name:<40} {wt.age_days:>10.1f} {pr_str:>6}") + + print() + + +def show_stats(manager: PRWorktreeManager) -> None: + """Show worktree cleanup statistics.""" + worktrees = manager.get_worktree_info() + registered = manager.get_registered_worktrees() + # Use resolved paths for consistent comparison (handles macOS symlinks) + registered_resolved = {p.resolve() for p in registered} + + # Get current policy values (may be overridden by env vars) + max_age_days = _get_max_age_days() + max_worktrees = _get_max_pr_worktrees() + + total = len(worktrees) + orphaned = sum( + 1 for wt in worktrees if wt.path.resolve() not in registered_resolved + ) + expired = sum(1 for wt in worktrees if wt.age_days > max_age_days) + excess = max(0, total - max_worktrees) + + print("\nPR Worktree Statistics:") + print(f" Total worktrees: {total}") + print(f" Registered with git: {len(registered)}") + print(f" Orphaned (not in git): {orphaned}") + print(f" Expired (>{max_age_days} days): {expired}") + print(f" Excess (>{max_worktrees} limit): {excess}") + print() + print("Cleanup Policies:") + print(f" Max age: {max_age_days} days") + print(f" Max count: {max_worktrees} worktrees") + print() + + +def cleanup_worktrees(manager: PRWorktreeManager, force: bool = False) -> None: + """Run cleanup policies on worktrees.""" + print("\nRunning PR worktree cleanup...") + if force: + print("WARNING: Force cleanup - removing ALL worktrees!") + count = manager.cleanup_all_worktrees() + print(f"Removed {count} worktrees.") + else: + stats = manager.cleanup_worktrees() + if stats["total"] == 0: + print("No worktrees needed cleanup.") + else: + print("\nCleanup complete:") + print(f" Orphaned removed: {stats['orphaned']}") + print(f" Expired removed: {stats['expired']}") + print(f" Excess removed: {stats['excess']}") + print(f" Total removed: {stats['total']}") + print() + + +def main(): + parser = argparse.ArgumentParser( + description="Manage PR review worktrees", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python cleanup_pr_worktrees.py --list + python cleanup_pr_worktrees.py --cleanup + python cleanup_pr_worktrees.py --stats + python cleanup_pr_worktrees.py --cleanup-all + +Environment variables: + MAX_PR_WORKTREES=10 # Max number of worktrees to keep + PR_WORKTREE_MAX_AGE_DAYS=7 # Max age in days before cleanup + """, + ) + + parser.add_argument( + "--list", action="store_true", help="List all PR review worktrees" + ) + + parser.add_argument( + "--cleanup", + action="store_true", + help="Run cleanup policies (remove orphaned, expired, and excess worktrees)", + ) + + parser.add_argument( + "--cleanup-all", + action="store_true", + help="Remove ALL PR review worktrees (dangerous!)", + ) + + parser.add_argument("--stats", action="store_true", help="Show cleanup statistics") + + parser.add_argument( + "--project-dir", + type=Path, + help="Project directory (default: auto-detect git root)", + ) + + args = parser.parse_args() + + # Require at least one action + if not any([args.list, args.cleanup, args.cleanup_all, args.stats]): + parser.print_help() + return 1 + + try: + # Find project directory + if args.project_dir: + project_dir = args.project_dir + else: + project_dir = find_project_root() + + print(f"Project directory: {project_dir}") + + # Create manager + manager = PRWorktreeManager( + project_dir=project_dir, worktree_dir=".auto-claude/github/pr/worktrees" + ) + + # Execute actions + if args.stats: + show_stats(manager) + + if args.list: + list_worktrees(manager) + + if args.cleanup: + cleanup_worktrees(manager, force=False) + + if args.cleanup_all: + response = input( + "This will remove ALL PR worktrees. Are you sure? (yes/no): " + ) + if response.lower() == "yes": + cleanup_worktrees(manager, force=True) + else: + print("Aborted.") + + return 0 + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/apps/backend/runners/github/confidence.py b/apps/backend/runners/github/confidence.py index 0e21b211eb..70557b922c 100644 --- a/apps/backend/runners/github/confidence.py +++ b/apps/backend/runners/github/confidence.py @@ -1,16 +1,18 @@ """ -Review Confidence Scoring -========================= +DEPRECATED: Review Confidence Scoring +===================================== -Adds confidence scores to review findings to help users prioritize. +This module is DEPRECATED and will be removed in a future version. -Features: -- Confidence scoring based on pattern matching, historical accuracy -- Risk assessment (false positive likelihood) -- Evidence tracking for transparency -- Calibration based on outcome tracking +The confidence scoring approach has been replaced with EVIDENCE-BASED VALIDATION: +- Instead of assigning confidence scores (0-100), findings now require concrete + code evidence proving the issue exists. +- Simple rule: If you can't show the actual problematic code, don't report it. +- Validation is binary: either the evidence exists in the file or it doesn't. -Usage: +For new code, use evidence-based validation in pydantic_models.py and models.py instead. + +Legacy Usage (deprecated): scorer = ConfidenceScorer(learning_tracker=tracker) # Score a finding @@ -20,10 +22,24 @@ # Get explanation print(scorer.explain_confidence(scored)) + +Migration: + - Instead of `confidence: float`, use `evidence: str` with actual code snippets + - Instead of filtering by confidence threshold, verify evidence exists in file + - See pr_finding_validator.md for the new evidence-based approach """ from __future__ import annotations +import warnings + +warnings.warn( + "The confidence module is deprecated. Use evidence-based validation instead. " + "See models.py 'evidence' field and pr_finding_validator.md for the new approach.", + DeprecationWarning, + stacklevel=2, +) + from dataclasses import dataclass, field from enum import Enum from typing import Any diff --git a/apps/backend/runners/github/context_gatherer.py b/apps/backend/runners/github/context_gatherer.py index 0ce48bf5ea..9a3c551261 100644 --- a/apps/backend/runners/github/context_gatherer.py +++ b/apps/backend/runners/github/context_gatherer.py @@ -204,6 +204,11 @@ class PRContext: # Commit SHAs for worktree creation (PR review isolation) head_sha: str = "" # Commit SHA of PR head (headRefOid) base_sha: str = "" # Commit SHA of PR base (baseRefOid) + # Merge conflict status + has_merge_conflicts: bool = False # True if PR has conflicts with base branch + merge_state_status: str = ( + "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE + ) class PRContextGatherer: @@ -276,6 +281,17 @@ async def gather(self) -> PRContext: # Check if diff was truncated (empty diff but files were changed) diff_truncated = len(diff) == 0 and len(changed_files) > 0 + # Check merge conflict status + mergeable = pr_data.get("mergeable", "UNKNOWN") + merge_state_status = pr_data.get("mergeStateStatus", "UNKNOWN") + has_merge_conflicts = mergeable == "CONFLICTING" + + if has_merge_conflicts: + print( + f"[Context] ⚠️ PR has merge conflicts (mergeStateStatus: {merge_state_status})", + flush=True, + ) + return PRContext( pr_number=self.pr_number, title=pr_data["title"], @@ -296,6 +312,8 @@ async def gather(self) -> PRContext: diff_truncated=diff_truncated, head_sha=pr_data.get("headRefOid", ""), base_sha=pr_data.get("baseRefOid", ""), + has_merge_conflicts=has_merge_conflicts, + merge_state_status=merge_state_status, ) async def _fetch_pr_metadata(self) -> dict: @@ -317,6 +335,8 @@ async def _fetch_pr_metadata(self) -> dict: "deletions", "changedFiles", "labels", + "mergeable", # MERGEABLE, CONFLICTING, or UNKNOWN + "mergeStateStatus", # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE ], ) @@ -1036,28 +1056,56 @@ async def gather(self) -> FollowupReviewContext: f"[Followup] Comparing {previous_sha[:8]}...{current_sha[:8]}", flush=True ) - # Get commit comparison + # Get PR-scoped files and commits (excludes merge-introduced changes) + # This solves the problem where merging develop into a feature branch + # would include commits from other PRs in the follow-up review. + # Pass reviewed_file_blobs for rebase-resistant comparison + reviewed_file_blobs = getattr(self.previous_review, "reviewed_file_blobs", {}) try: - comparison = await self.gh_client.compare_commits(previous_sha, current_sha) - except Exception as e: - print(f"[Followup] Error comparing commits: {e}", flush=True) - return FollowupReviewContext( - pr_number=self.pr_number, - previous_review=self.previous_review, - previous_commit_sha=previous_sha, - current_commit_sha=current_sha, - error=f"Failed to compare commits: {e}", + pr_files, new_commits = await self.gh_client.get_pr_files_changed_since( + self.pr_number, previous_sha, reviewed_file_blobs=reviewed_file_blobs ) + print( + f"[Followup] PR has {len(pr_files)} files, " + f"{len(new_commits)} commits since last review" + + (" (blob comparison used)" if reviewed_file_blobs else ""), + flush=True, + ) + except Exception as e: + print(f"[Followup] Error getting PR files/commits: {e}", flush=True) + # Fallback to compare_commits if PR endpoints fail + print("[Followup] Falling back to commit comparison...", flush=True) + try: + comparison = await self.gh_client.compare_commits( + previous_sha, current_sha + ) + new_commits = comparison.get("commits", []) + pr_files = comparison.get("files", []) + print( + f"[Followup] Fallback: Found {len(new_commits)} commits, " + f"{len(pr_files)} files (may include merge-introduced changes)", + flush=True, + ) + except Exception as e2: + print(f"[Followup] Fallback also failed: {e2}", flush=True) + return FollowupReviewContext( + pr_number=self.pr_number, + previous_review=self.previous_review, + previous_commit_sha=previous_sha, + current_commit_sha=current_sha, + error=f"Failed to get PR context: {e}, fallback: {e2}", + ) - # Extract data from comparison - commits = comparison.get("commits", []) - files = comparison.get("files", []) + # Use PR files as the canonical list (excludes files from merged branches) + commits = new_commits + files = pr_files print( f"[Followup] Found {len(commits)} new commits, {len(files)} changed files", flush=True, ) # Build diff from file patches + # Note: PR files endpoint returns 'filename' key, compare returns 'filename' too diff_parts = [] files_changed = [] for file_info in files: @@ -1139,6 +1187,26 @@ async def gather(self) -> FollowupReviewContext: flush=True, ) + # Fetch current merge conflict status + has_merge_conflicts = False + merge_state_status = "UNKNOWN" + try: + pr_status = await self.gh_client.pr_get( + self.pr_number, + json_fields=["mergeable", "mergeStateStatus"], + ) + mergeable = pr_status.get("mergeable", "UNKNOWN") + merge_state_status = pr_status.get("mergeStateStatus", "UNKNOWN") + has_merge_conflicts = mergeable == "CONFLICTING" + + if has_merge_conflicts: + print( + f"[Followup] ⚠️ PR has merge conflicts (mergeStateStatus: {merge_state_status})", + flush=True, + ) + except Exception as e: + print(f"[Followup] Could not fetch merge status: {e}", flush=True) + return FollowupReviewContext( pr_number=self.pr_number, previous_review=self.previous_review, @@ -1151,4 +1219,6 @@ async def gather(self) -> FollowupReviewContext: + contributor_reviews, ai_bot_comments_since_review=ai_comments, pr_reviews_since_review=pr_reviews, + has_merge_conflicts=has_merge_conflicts, + merge_state_status=merge_state_status, ) diff --git a/apps/backend/runners/github/gh_client.py b/apps/backend/runners/github/gh_client.py index 942aefa2b4..4ade5f913b 100644 --- a/apps/backend/runners/github/gh_client.py +++ b/apps/backend/runners/github/gh_client.py @@ -822,14 +822,17 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: Returns: Dict with: - - checks: List of check runs with name, status, conclusion + - checks: List of check runs with name, state - passing: Number of passing checks - failing: Number of failing checks - pending: Number of pending checks - failed_checks: List of failed check names """ try: - args = ["pr", "checks", str(pr_number), "--json", "name,state,conclusion"] + # Note: gh pr checks --json only supports: bucket, completedAt, description, + # event, link, name, startedAt, state, workflow + # The 'state' field directly contains the result (SUCCESS, FAILURE, PENDING, etc.) + args = ["pr", "checks", str(pr_number), "--json", "name,state"] args = self._add_repo_flag(args) result = await self.run(args, timeout=30.0) @@ -842,15 +845,14 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: for check in checks: state = check.get("state", "").upper() - conclusion = check.get("conclusion", "").upper() name = check.get("name", "Unknown") - if state == "COMPLETED": - if conclusion in ("SUCCESS", "NEUTRAL", "SKIPPED"): - passing += 1 - elif conclusion in ("FAILURE", "TIMED_OUT", "CANCELLED"): - failing += 1 - failed_checks.append(name) + # gh pr checks 'state' directly contains: SUCCESS, FAILURE, PENDING, NEUTRAL, etc. + if state in ("SUCCESS", "NEUTRAL", "SKIPPED"): + passing += 1 + elif state in ("FAILURE", "TIMED_OUT", "CANCELLED", "STARTUP_FAILURE"): + failing += 1 + failed_checks.append(name) else: # PENDING, QUEUED, IN_PROGRESS, etc. pending += 1 @@ -872,3 +874,336 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: "failed_checks": [], "error": str(e), } + + async def get_workflows_awaiting_approval(self, pr_number: int) -> dict[str, Any]: + """ + Get workflow runs awaiting approval for a PR from a fork. + + Workflows from forked repositories require manual approval before running. + These are NOT included in `gh pr checks` and must be queried separately. + + Args: + pr_number: PR number + + Returns: + Dict with: + - awaiting_approval: Number of workflows waiting for approval + - workflow_runs: List of workflow runs with id, name, html_url + - can_approve: Whether this token can approve workflows + """ + try: + # First, get the PR's head SHA to filter workflow runs + pr_args = ["pr", "view", str(pr_number), "--json", "headRefOid"] + pr_args = self._add_repo_flag(pr_args) + pr_result = await self.run(pr_args, timeout=30.0) + pr_data = json.loads(pr_result.stdout) if pr_result.stdout.strip() else {} + head_sha = pr_data.get("headRefOid", "") + + if not head_sha: + return { + "awaiting_approval": 0, + "workflow_runs": [], + "can_approve": False, + } + + # Query workflow runs with action_required status + # Note: We need to use the API endpoint as gh CLI doesn't have direct support + endpoint = ( + "repos/{owner}/{repo}/actions/runs?status=action_required&per_page=100" + ) + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=30.0) + data = json.loads(result.stdout) if result.stdout.strip() else {} + all_runs = data.get("workflow_runs", []) + + # Filter to only runs for this PR's head SHA + pr_runs = [ + { + "id": run.get("id"), + "name": run.get("name"), + "html_url": run.get("html_url"), + "workflow_name": run.get("workflow", {}).get("name", "Unknown"), + } + for run in all_runs + if run.get("head_sha") == head_sha + ] + + return { + "awaiting_approval": len(pr_runs), + "workflow_runs": pr_runs, + "can_approve": True, # Assume token has permission, will fail if not + } + except (GHCommandError, GHTimeoutError, json.JSONDecodeError) as e: + logger.warning( + f"Failed to get workflows awaiting approval for #{pr_number}: {e}" + ) + return { + "awaiting_approval": 0, + "workflow_runs": [], + "can_approve": False, + "error": str(e), + } + + async def approve_workflow_run(self, run_id: int) -> bool: + """ + Approve a workflow run that's waiting for approval (from a fork). + + Args: + run_id: The workflow run ID to approve + + Returns: + True if approval succeeded, False otherwise + """ + try: + endpoint = f"repos/{{owner}}/{{repo}}/actions/runs/{run_id}/approve" + args = ["api", "--method", "POST", endpoint] + + await self.run(args, timeout=30.0) + logger.info(f"Approved workflow run {run_id}") + return True + except (GHCommandError, GHTimeoutError) as e: + logger.warning(f"Failed to approve workflow run {run_id}: {e}") + return False + + async def get_pr_checks_comprehensive(self, pr_number: int) -> dict[str, Any]: + """ + Get comprehensive CI status including workflows awaiting approval. + + This combines: + - Standard check runs from `gh pr checks` + - Workflows awaiting approval (for fork PRs) + + Args: + pr_number: PR number + + Returns: + Dict with all check information including awaiting_approval count + """ + # Get standard checks + checks = await self.get_pr_checks(pr_number) + + # Get workflows awaiting approval + awaiting = await self.get_workflows_awaiting_approval(pr_number) + + # Merge the results + checks["awaiting_approval"] = awaiting.get("awaiting_approval", 0) + checks["awaiting_workflow_runs"] = awaiting.get("workflow_runs", []) + + # Update pending count to include awaiting approval + checks["pending"] = checks.get("pending", 0) + awaiting.get( + "awaiting_approval", 0 + ) + + return checks + + async def get_pr_files(self, pr_number: int) -> list[dict[str, Any]]: + """ + Get files changed by a PR using the PR files endpoint. + + IMPORTANT: This returns only files that are part of the PR's actual changes, + NOT files that came in from merging another branch (e.g., develop). + This is crucial for follow-up reviews to avoid reviewing code from other PRs. + + Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/files + + Args: + pr_number: PR number + + Returns: + List of file objects with: + - filename: Path to the file + - status: added, removed, modified, renamed, copied, changed + - additions: Number of lines added + - deletions: Number of lines deleted + - changes: Total number of line changes + - patch: The unified diff patch for this file (may be absent for large files) + """ + files = [] + page = 1 + per_page = 100 + + while True: + endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/files?page={page}&per_page={per_page}" + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=60.0) + page_files = json.loads(result.stdout) if result.stdout.strip() else [] + + if not page_files: + break + + files.extend(page_files) + + # Check if we got a full page (more pages might exist) + if len(page_files) < per_page: + break + + page += 1 + + # Safety limit to prevent infinite loops + if page > 50: + logger.warning( + f"PR #{pr_number} has more than 5000 files, stopping pagination" + ) + break + + return files + + async def get_pr_commits(self, pr_number: int) -> list[dict[str, Any]]: + """ + Get commits that are part of a PR using the PR commits endpoint. + + IMPORTANT: This returns only commits that are part of the PR's branch, + NOT commits that came in from merging another branch (e.g., develop). + This is crucial for follow-up reviews to avoid reviewing commits from other PRs. + + Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/commits + + Args: + pr_number: PR number + + Returns: + List of commit objects with: + - sha: Commit SHA + - commit: Object with message, author, committer info + - author: GitHub user who authored the commit + - committer: GitHub user who committed + - parents: List of parent commit SHAs + """ + commits = [] + page = 1 + per_page = 100 + + while True: + endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/commits?page={page}&per_page={per_page}" + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=60.0) + page_commits = json.loads(result.stdout) if result.stdout.strip() else [] + + if not page_commits: + break + + commits.extend(page_commits) + + # Check if we got a full page (more pages might exist) + if len(page_commits) < per_page: + break + + page += 1 + + # Safety limit + if page > 10: + logger.warning( + f"PR #{pr_number} has more than 1000 commits, stopping pagination" + ) + break + + return commits + + async def get_pr_files_changed_since( + self, + pr_number: int, + base_sha: str, + reviewed_file_blobs: dict[str, str] | None = None, + ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + """ + Get files and commits that are part of the PR and changed since a specific commit. + + This method solves the "merge introduced commits" problem by: + 1. Getting the canonical list of PR files (excludes files from merged branches) + 2. Getting the canonical list of PR commits (excludes commits from merged branches) + 3. Filtering to only include commits after base_sha + + When a rebase/force-push is detected (base_sha not found in commits), and + reviewed_file_blobs is provided, uses blob SHA comparison to identify which + files actually changed content. This prevents re-reviewing unchanged files. + + Args: + pr_number: PR number + base_sha: The commit SHA to compare from (e.g., last reviewed commit) + reviewed_file_blobs: Optional dict mapping filename -> blob SHA from the + previous review. Used as fallback when base_sha is not found (rebase). + + Returns: + Tuple of: + - List of file objects that are part of the PR (filtered if blob comparison used) + - List of commit objects that are part of the PR and after base_sha. + NOTE: Returns empty list if rebase/force-push detected, since commit SHAs + are rewritten and we cannot determine which commits are truly "new". + """ + # Get PR's canonical files (these are the actual PR changes) + pr_files = await self.get_pr_files(pr_number) + + # Get PR's canonical commits + pr_commits = await self.get_pr_commits(pr_number) + + # Find the position of base_sha in PR commits + # Use minimum 7-char prefix comparison (git's default short SHA length) + base_index = -1 + min_prefix_len = 7 + base_prefix = ( + base_sha[:min_prefix_len] if len(base_sha) >= min_prefix_len else base_sha + ) + for i, commit in enumerate(pr_commits): + commit_prefix = commit["sha"][:min_prefix_len] + if commit_prefix == base_prefix: + base_index = i + break + + # Commits after base_sha (these are the new commits to review) + if base_index >= 0: + new_commits = pr_commits[base_index + 1 :] + return pr_files, new_commits + + # base_sha not found in PR commits - this happens when: + # 1. The base_sha was from a merge commit (not a direct PR commit) + # 2. The PR was rebased/force-pushed + logger.warning( + f"base_sha {base_sha[:8]} not found in PR #{pr_number} commits. " + "PR was likely rebased or force-pushed." + ) + + # If we have blob SHAs from the previous review, use them to filter files + # Blob SHAs persist across rebases - same content = same blob SHA + if reviewed_file_blobs: # Only use blob comparison if we have actual blob data + changed_files = [] + unchanged_count = 0 + for file in pr_files: + filename = file.get("filename", "") + current_blob_sha = file.get("sha", "") + file_status = file.get("status", "") + previous_blob_sha = reviewed_file_blobs.get(filename, "") + + # Always include files that were added, removed, or renamed + # These are significant changes regardless of blob SHA + if file_status in ("added", "removed", "renamed"): + changed_files.append(file) + elif not previous_blob_sha: + # File wasn't in previous review - include it + changed_files.append(file) + elif current_blob_sha != previous_blob_sha: + # File content changed - include it + changed_files.append(file) + else: + # Same blob SHA = same content - skip it + unchanged_count += 1 + + if unchanged_count > 0: + logger.info( + f"Blob comparison: {len(changed_files)} files changed, " + f"{unchanged_count} unchanged (skipped)" + ) + + # Return filtered files but empty commits list (can't determine "new" commits after rebase) + # After a rebase, all commit SHAs are rewritten so we can't identify which are truly new. + # The file changes via blob comparison are the reliable source of what changed. + return changed_files, [] + + # No blob data available - return all files but empty commits (can't determine new commits) + logger.warning( + "No reviewed_file_blobs available for blob comparison after rebase. " + "Returning all PR files with empty commits list." + ) + return pr_files, [] diff --git a/apps/backend/runners/github/models.py b/apps/backend/runners/github/models.py index cb7dbe22e9..0d95eb2a63 100644 --- a/apps/backend/runners/github/models.py +++ b/apps/backend/runners/github/models.py @@ -214,19 +214,18 @@ class PRReviewFinding: end_line: int | None = None suggested_fix: str | None = None fixable: bool = False - # NEW: Support for verification and redundancy detection - confidence: float = 0.85 # AI's confidence in this finding (0.0-1.0) + # Evidence-based validation: actual code proving the issue exists + evidence: str | None = None # Actual code snippet showing the issue verification_note: str | None = ( None # What evidence is missing or couldn't be verified ) redundant_with: str | None = None # Reference to duplicate code (file:line) - # NEW: Finding validation fields (from finding-validator re-investigation) + # Finding validation fields (from finding-validator re-investigation) validation_status: str | None = ( None # confirmed_valid, dismissed_false_positive, needs_human_review ) validation_evidence: str | None = None # Code snippet examined during validation - validation_confidence: float | None = None # Confidence of validation (0.0-1.0) validation_explanation: str | None = None # Why finding was validated/dismissed def to_dict(self) -> dict: @@ -241,14 +240,13 @@ def to_dict(self) -> dict: "end_line": self.end_line, "suggested_fix": self.suggested_fix, "fixable": self.fixable, - # NEW fields - "confidence": self.confidence, + # Evidence-based validation fields + "evidence": self.evidence, "verification_note": self.verification_note, "redundant_with": self.redundant_with, # Validation fields "validation_status": self.validation_status, "validation_evidence": self.validation_evidence, - "validation_confidence": self.validation_confidence, "validation_explanation": self.validation_explanation, } @@ -265,14 +263,13 @@ def from_dict(cls, data: dict) -> PRReviewFinding: end_line=data.get("end_line"), suggested_fix=data.get("suggested_fix"), fixable=data.get("fixable", False), - # NEW fields - confidence=data.get("confidence", 0.85), + # Evidence-based validation fields + evidence=data.get("evidence"), verification_note=data.get("verification_note"), redundant_with=data.get("redundant_with"), # Validation fields validation_status=data.get("validation_status"), validation_evidence=data.get("validation_evidence"), - validation_confidence=data.get("validation_confidence"), validation_explanation=data.get("validation_explanation"), ) @@ -383,6 +380,9 @@ class PRReviewResult: # Follow-up review tracking reviewed_commit_sha: str | None = None # HEAD SHA at time of review + reviewed_file_blobs: dict[str, str] = field( + default_factory=dict + ) # filename β†’ blob SHA at time of review (survives rebases) is_followup_review: bool = False # True if this is a follow-up review previous_review_id: int | None = None # Reference to the review this follows up on resolved_findings: list[str] = field(default_factory=list) # Finding IDs now fixed @@ -421,6 +421,7 @@ def to_dict(self) -> dict: "quick_scan_summary": self.quick_scan_summary, # Follow-up review fields "reviewed_commit_sha": self.reviewed_commit_sha, + "reviewed_file_blobs": self.reviewed_file_blobs, "is_followup_review": self.is_followup_review, "previous_review_id": self.previous_review_id, "resolved_findings": self.resolved_findings, @@ -465,6 +466,7 @@ def from_dict(cls, data: dict) -> PRReviewResult: quick_scan_summary=data.get("quick_scan_summary", {}), # Follow-up review fields reviewed_commit_sha=data.get("reviewed_commit_sha"), + reviewed_file_blobs=data.get("reviewed_file_blobs", {}), is_followup_review=data.get("is_followup_review", False), previous_review_id=data.get("previous_review_id"), resolved_findings=data.get("resolved_findings", []), @@ -562,6 +564,16 @@ class FollowupReviewContext: # These are different from comments - they're full review submissions with body text pr_reviews_since_review: list[dict] = field(default_factory=list) + # Merge conflict status + has_merge_conflicts: bool = False # True if PR has conflicts with base branch + merge_state_status: str = ( + "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE + ) + + # CI status - passed to AI orchestrator so it can factor into verdict + # Dict with: passing, failing, pending, failed_checks, awaiting_approval + ci_status: dict = field(default_factory=dict) + # Error flag - if set, context gathering failed and data may be incomplete error: str | None = None diff --git a/apps/backend/runners/github/orchestrator.py b/apps/backend/runners/github/orchestrator.py index 0cfb078efe..e3b797f742 100644 --- a/apps/backend/runners/github/orchestrator.py +++ b/apps/backend/runners/github/orchestrator.py @@ -389,17 +389,37 @@ async def review_pr( pr_number=pr_number, ) - # Check CI status - ci_status = await self.gh_client.get_pr_checks(pr_number) + # Check CI status (comprehensive - includes workflows awaiting approval) + ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number) + + # Log CI status with awaiting approval info + awaiting = ci_status.get("awaiting_approval", 0) + pending_without_awaiting = ci_status.get("pending", 0) - awaiting + ci_log_parts = [ + f"{ci_status.get('passing', 0)} passing", + f"{ci_status.get('failing', 0)} failing", + ] + if pending_without_awaiting > 0: + ci_log_parts.append(f"{pending_without_awaiting} pending") + if awaiting > 0: + ci_log_parts.append(f"{awaiting} awaiting approval") print( - f"[DEBUG orchestrator] CI status: {ci_status.get('passing', 0)} passing, " - f"{ci_status.get('failing', 0)} failing, {ci_status.get('pending', 0)} pending", + f"[orchestrator] CI status: {', '.join(ci_log_parts)}", flush=True, ) + if awaiting > 0: + print( + f"[orchestrator] ⚠️ {awaiting} workflow(s) from fork need maintainer approval to run", + flush=True, + ) - # Generate verdict (now includes CI status) + # Generate verdict (includes CI status and merge conflict check) verdict, verdict_reasoning, blockers = self._generate_verdict( - findings, structural_issues, ai_triages, ci_status + findings, + structural_issues, + ai_triages, + ci_status, + has_merge_conflicts=pr_context.has_merge_conflicts, ) print( f"[DEBUG orchestrator] Verdict: {verdict.value} - {verdict_reasoning}", @@ -435,6 +455,25 @@ async def review_pr( # Get HEAD SHA for follow-up review tracking head_sha = self.bot_detector.get_last_commit_sha(pr_context.commits) + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + pr_files = await self.gh_client.get_pr_files(pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + print( + f"[Review] Captured {len(file_blobs)} file blob SHAs for follow-up tracking", + flush=True, + ) + except Exception as e: + print( + f"[Review] Warning: Could not capture file blobs: {e}", flush=True + ) + # Create result result = PRReviewResult( pr_number=pr_number, @@ -452,6 +491,8 @@ async def review_pr( quick_scan_summary=quick_scan, # Track the commit SHA for follow-up reviews reviewed_commit_sha=head_sha, + # Track file blobs for rebase-resistant follow-up reviews + reviewed_file_blobs=file_blobs, ) # Post review if configured @@ -479,6 +520,9 @@ async def review_pr( # Save result await result.save(self.github_dir) + # Note: PR review memory is now saved by the Electron app after the review completes + # This ensures memory is saved to the embedded LadybugDB managed by the app + # Mark as reviewed (head_sha already fetched above) if head_sha: self.bot_detector.mark_reviewed(pr_number, head_sha) @@ -594,19 +638,29 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: await result.save(self.github_dir) return result - # Check if there are new commits - if not followup_context.commits_since_review: + # Check if there are changes to review (commits OR files via blob comparison) + # After a rebase/force-push, commits_since_review will be empty (commit + # SHAs are rewritten), but files_changed_since_review will contain files + # that actually changed content based on blob SHA comparison. + has_commits = bool(followup_context.commits_since_review) + has_file_changes = bool(followup_context.files_changed_since_review) + + if not has_commits and not has_file_changes: + base_sha = previous_review.reviewed_commit_sha[:8] print( - f"[Followup] No new commits since last review at {previous_review.reviewed_commit_sha[:8]}", + f"[Followup] No changes since last review at {base_sha}", flush=True, ) # Return a result indicating no changes + no_change_summary = ( + "No new commits since last review. Previous findings still apply." + ) result = PRReviewResult( pr_number=pr_number, repo=self.config.repo, success=True, findings=previous_review.findings, - summary="No new commits since last review. Previous findings still apply.", + summary=no_change_summary, overall_status=previous_review.overall_status, verdict=previous_review.verdict, verdict_reasoning="No changes since last review.", @@ -618,13 +672,26 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: await result.save(self.github_dir) return result + # Build progress message based on what changed + if has_commits: + num_commits = len(followup_context.commits_since_review) + change_desc = f"{num_commits} new commits" + else: + # Rebase detected - files changed but no trackable commits + num_files = len(followup_context.files_changed_since_review) + change_desc = f"{num_files} files (rebase detected)" + self._report_progress( "analyzing", 30, - f"Analyzing {len(followup_context.commits_since_review)} new commits...", + f"Analyzing {change_desc}...", pr_number=pr_number, ) + # Fetch CI status BEFORE calling reviewer so AI can factor it into verdict + ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number) + followup_context.ci_status = ci_status + # Use parallel orchestrator for follow-up if enabled if self.config.use_parallel_orchestrator: print( @@ -669,9 +736,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: ) result = await reviewer.review_followup(followup_context) - # Check CI status and override verdict if failing - ci_status = await self.gh_client.get_pr_checks(pr_number) - failed_checks = ci_status.get("failed_checks", []) + # Fallback: ensure CI failures block merge even if AI didn't factor it in + # (CI status was already passed to AI via followup_context.ci_status) + failed_checks = followup_context.ci_status.get("failed_checks", []) if failed_checks: print( f"[Followup] CI checks failing: {failed_checks}", @@ -703,6 +770,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: # Save result await result.save(self.github_dir) + # Note: PR review memory is now saved by the Electron app after the review completes + # This ensures memory is saved to the embedded LadybugDB managed by the app + # Mark as reviewed with new commit SHA if result.reviewed_commit_sha: self.bot_detector.mark_reviewed(pr_number, result.reviewed_commit_sha) @@ -730,16 +800,26 @@ def _generate_verdict( structural_issues: list[StructuralIssue], ai_triages: list[AICommentTriage], ci_status: dict | None = None, + has_merge_conflicts: bool = False, ) -> tuple[MergeVerdict, str, list[str]]: """ - Generate merge verdict based on all findings and CI status. + Generate merge verdict based on all findings, CI status, and merge conflicts. - NEW: Strengthened to block on verification failures, redundancy issues, - and failing CI checks. + Blocks on: + - Merge conflicts (must be resolved before merging) + - Verification failures + - Redundancy issues + - Failing CI checks """ blockers = [] ci_status = ci_status or {} + # CRITICAL: Merge conflicts block merging - check first + if has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + # Count by severity critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL] high = [f for f in findings if f.severity == ReviewSeverity.HIGH] @@ -780,6 +860,13 @@ def _generate_verdict( for check_name in failed_checks: blockers.append(f"CI Failed: {check_name}") + # Workflows awaiting approval block merging (fork PRs) + awaiting_approval = ci_status.get("awaiting_approval", 0) + if awaiting_approval > 0: + blockers.append( + f"Workflows Pending: {awaiting_approval} workflow(s) awaiting maintainer approval" + ) + # NEW: Verification failures block merging for f in verification_failures: note = f" - {f.verification_note}" if f.verification_note else "" @@ -812,15 +899,29 @@ def _generate_verdict( ) blockers.append(f"{t.tool_name}: {summary}") - # Determine verdict with CI, verification and redundancy checks + # Determine verdict with merge conflicts, CI, verification and redundancy checks if blockers: + # Merge conflicts are the highest priority blocker + if has_merge_conflicts: + verdict = MergeVerdict.BLOCKED + reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) # CI failures are always blockers - if failed_checks: + elif failed_checks: verdict = MergeVerdict.BLOCKED reasoning = ( f"Blocked: {len(failed_checks)} CI check(s) failing. " "Fix CI before merge." ) + # Workflows awaiting approval block merging + elif awaiting_approval > 0: + verdict = MergeVerdict.BLOCKED + reasoning = ( + f"Blocked: {awaiting_approval} workflow(s) awaiting approval. " + "Approve workflows on GitHub to run CI checks." + ) # NEW: Prioritize verification failures elif verification_failures: verdict = MergeVerdict.BLOCKED diff --git a/apps/backend/runners/github/providers/__init__.py b/apps/backend/runners/github/providers/__init__.py index 52db9fc3e9..38dc154871 100644 --- a/apps/backend/runners/github/providers/__init__.py +++ b/apps/backend/runners/github/providers/__init__.py @@ -17,8 +17,9 @@ await provider.post_review(123, review) """ -from .factory import get_provider, register_provider +from .factory import get_provider, register_provider, list_available_providers, is_provider_available from .github_provider import GitHubProvider +from .azure_devops_provider import AzureDevOpsProvider from .protocol import ( GitProvider, IssueData, @@ -42,7 +43,10 @@ "ProviderType", # Implementations "GitHubProvider", + "AzureDevOpsProvider", # Factory "get_provider", "register_provider", + "list_available_providers", + "is_provider_available", ] diff --git a/apps/backend/runners/github/providers/azure_devops_provider.py b/apps/backend/runners/github/providers/azure_devops_provider.py new file mode 100644 index 0000000000..eb29943670 --- /dev/null +++ b/apps/backend/runners/github/providers/azure_devops_provider.py @@ -0,0 +1,958 @@ +""" +Azure DevOps Provider Implementation +===================================== + +Implements the GitProvider protocol for Azure DevOps. +Uses the Azure DevOps Python API for all operations. + +Security Notes: +- PAT is retrieved from environment on each connection, not stored long-term +- All user inputs are sanitized before use in WIQL queries +- Blocking SDK calls are wrapped with asyncio.to_thread for proper async behavior +""" + +from __future__ import annotations + +import asyncio +import os +import re +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Any, List, Optional + +from .protocol import ( + IssueData, + IssueFilters, + LabelData, + PRData, + PRFilters, + ProviderType, + ReviewData, +) + + +def _sanitize_wiql_string(value: str) -> str: + """ + Sanitize a string value for use in WIQL queries. + + Prevents WIQL injection by escaping/removing dangerous characters. + """ + if not value: + return "" + # Remove or escape characters that could be used for injection + # WIQL uses single quotes for strings, so escape them + sanitized = value.replace("'", "''") + # Remove any control characters + sanitized = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', sanitized) + # Limit length to prevent abuse + return sanitized[:500] + + +def _validate_state(state: str) -> str: + """Validate state parameter against allowed values.""" + allowed_states = {"open", "closed", "all", "New", "Active", "Resolved", "Closed", "Done"} + if state not in allowed_states: + return "open" + return state + + +@dataclass +class AzureDevOpsProvider: + """ + Azure DevOps implementation of the GitProvider protocol. + + Uses the Azure DevOps REST API via the azure-devops Python package. + All blocking SDK calls are wrapped with asyncio.to_thread() for proper + async behavior. + + Usage: + provider = AzureDevOpsProvider( + organization="myorg", + project="myproject", + repo_name="myrepo" + ) + pr = await provider.fetch_pr(123) + await provider.post_review(123, review) + + Security: + - PAT should be provided via ADO_PAT environment variable + - All WIQL queries use parameterized/sanitized inputs + - Connection credentials are not persisted beyond necessary scope + """ + + _organization: Optional[str] = None + _project: Optional[str] = None + _repo_name: Optional[str] = None + _pat: Optional[str] = None + _instance_url: Optional[str] = None + + # Work item query limit (configurable) + _max_work_items: int = 200 + + # Cached clients (lazy-initialized, excluded from repr/eq/hash) + _connection: Any = field(default=None, init=False, repr=False, compare=False) + _git_client: Any = field(default=None, init=False, repr=False, compare=False) + _wit_client: Any = field(default=None, init=False, repr=False, compare=False) + + def __post_init__(self): + # Load from environment if not provided + self._organization = self._organization or os.getenv("ADO_ORGANIZATION") + self._project = self._project or os.getenv("ADO_PROJECT") + self._repo_name = self._repo_name or os.getenv("ADO_REPO_NAME") or self._project + self._instance_url = self._instance_url or os.getenv( + "ADO_INSTANCE_URL", "https://dev.azure.com" + ) + + # Note: PAT is retrieved fresh each time _ensure_connection is called + # to support credential rotation + + if not all([self._organization, self._project]): + raise ValueError( + "Azure DevOps provider requires ADO_ORGANIZATION and ADO_PROJECT. " + "Set these environment variables or pass them to the constructor." + ) + + def _get_pat(self) -> str: + """Get PAT from provided value or environment. Raises if not available.""" + pat = self._pat or os.getenv("ADO_PAT") + if not pat: + raise ValueError( + "Azure DevOps PAT not configured. " + "Set ADO_PAT environment variable or pass pat to constructor." + ) + return pat + + def _ensure_connection(self): + """Lazily initialize the Azure DevOps connection.""" + if self._connection is None: + try: + from azure.devops.connection import Connection + from msrest.authentication import BasicAuthentication + except ImportError: + raise ImportError( + "Azure DevOps SDK not installed. " + "Install with: pip install azure-devops" + ) + + pat = self._get_pat() + credentials = BasicAuthentication("", pat) + org_url = f"{self._instance_url}/{self._organization}" + self._connection = Connection(base_url=org_url, creds=credentials) + + @property + def git_client(self): + """Get the Git API client.""" + self._ensure_connection() + if self._git_client is None: + self._git_client = self._connection.clients.get_git_client() + return self._git_client + + @property + def wit_client(self): + """Get the Work Item Tracking API client.""" + self._ensure_connection() + if self._wit_client is None: + self._wit_client = self._connection.clients.get_work_item_tracking_client() + return self._wit_client + + @property + def provider_type(self) -> ProviderType: + return ProviderType.AZURE_DEVOPS + + @property + def repo(self) -> str: + """Get repository in org/project/repo format.""" + return f"{self._organization}/{self._project}/{self._repo_name}" + + # ------------------------------------------------------------------------- + # Pull Request Operations + # ------------------------------------------------------------------------- + + async def fetch_pr(self, number: int) -> PRData: + """Fetch a pull request by ID.""" + pr = await asyncio.to_thread( + self.git_client.get_pull_request, + repository_id=self._repo_name, + pull_request_id=number, + project=self._project, + ) + + # Get the diff + diff = await self.fetch_pr_diff(number) + + return self._parse_pr_data(pr, diff) + + async def fetch_prs(self, filters: Optional[PRFilters] = None) -> List[PRData]: + """Fetch pull requests with optional filters.""" + filters = filters or PRFilters() + + # Map state to ADO status + status_map = {"open": "active", "closed": "completed", "all": "all"} + status = status_map.get(_validate_state(filters.state), "active") + + from azure.devops.v7_1.git.models import GitPullRequestSearchCriteria + + search_criteria = GitPullRequestSearchCriteria(status=status) + + if filters.base_branch: + search_criteria.target_ref_name = f"refs/heads/{_sanitize_wiql_string(filters.base_branch)}" + + if filters.head_branch: + search_criteria.source_ref_name = f"refs/heads/{_sanitize_wiql_string(filters.head_branch)}" + + prs = await asyncio.to_thread( + self.git_client.get_pull_requests, + repository_id=self._repo_name, + search_criteria=search_criteria, + project=self._project, + top=min(filters.limit, 1000), # Cap at 1000 + ) + + result = [] + for pr in prs: + # Apply additional filters + if filters.author: + author_name = getattr(pr.created_by, "unique_name", "") + if filters.author.lower() not in author_name.lower(): + continue + + if filters.labels: + pr_labels = [label.name for label in (pr.labels or [])] + if not all(label in pr_labels for label in filters.labels): + continue + + result.append(self._parse_pr_data(pr, "")) + + return result + + async def fetch_pr_diff(self, number: int) -> str: + """Fetch the diff for a pull request.""" + try: + # Get the PR to find the commits + pr = await asyncio.to_thread( + self.git_client.get_pull_request, + repository_id=self._repo_name, + pull_request_id=number, + project=self._project, + ) + + # Get commits in the PR + commits = await asyncio.to_thread( + self.git_client.get_pull_request_commits, + repository_id=self._repo_name, + pull_request_id=number, + project=self._project, + ) + + if not commits: + return "" + + # Use the changes endpoint + changes = await asyncio.to_thread( + self.git_client.get_pull_request_iteration_changes, + repository_id=self._repo_name, + pull_request_id=number, + iteration_id=1, # First iteration + project=self._project, + ) + + # Build a simple diff summary + diff_lines = [] + for change in changes.change_entries or []: + change_type = getattr(change, "change_type", "edit") + path = getattr(change.item, "path", "") if change.item else "" + diff_lines.append(f"--- {change_type}: {path}") + + return "\n".join(diff_lines) + + except Exception as e: + return f"Error fetching diff: {e}" + + async def post_review(self, pr_number: int, review: ReviewData) -> int: + """Post a review comment to a pull request.""" + from azure.devops.v7_1.git.models import Comment, CommentThread + + # Create a comment thread + comment = Comment(content=review.body) + thread = CommentThread( + comments=[comment], + status="active" if review.event == "request_changes" else "closed", + ) + + result = await asyncio.to_thread( + self.git_client.create_thread, + comment_thread=thread, + repository_id=self._repo_name, + pull_request_id=pr_number, + project=self._project, + ) + + # If approving/rejecting, also set the vote + if review.event in ("approve", "request_changes"): + vote = 10 if review.event == "approve" else -10 + + try: + from azure.devops.v7_1.git.models import IdentityRefWithVote + + reviewer = IdentityRefWithVote(vote=vote) + await asyncio.to_thread( + self.git_client.create_pull_request_reviewer, + reviewer=reviewer, + repository_id=self._repo_name, + pull_request_id=pr_number, + reviewer_id="me", + project=self._project, + ) + except Exception: + pass # Vote may fail if user is not a reviewer + + return result.id if result else 0 + + async def merge_pr( + self, + pr_number: int, + merge_method: str = "squash", + commit_title: Optional[str] = None, + ) -> bool: + """Merge a pull request.""" + try: + pr = await asyncio.to_thread( + self.git_client.get_pull_request, + repository_id=self._repo_name, + pull_request_id=pr_number, + project=self._project, + ) + + from azure.devops.v7_1.git.models import ( + GitPullRequest, + GitPullRequestCompletionOptions, + ) + + merge_strategy_map = { + "squash": "squash", + "rebase": "rebase", + "merge": "noFastForward", + } + + # Use proper SDK models instead of dict + completion_options = GitPullRequestCompletionOptions( + delete_source_branch=True, + merge_strategy=merge_strategy_map.get(merge_method, "squash"), + merge_commit_message=commit_title, + ) + + update_pr = GitPullRequest( + status="completed", + last_merge_source_commit=pr.last_merge_source_commit, + completion_options=completion_options, + ) + + await asyncio.to_thread( + self.git_client.update_pull_request, + git_pull_request_to_update=update_pr, + repository_id=self._repo_name, + pull_request_id=pr_number, + project=self._project, + ) + + return True + except Exception: + return False + + async def close_pr( + self, + pr_number: int, + comment: Optional[str] = None, + ) -> bool: + """Close a pull request without merging (abandon).""" + try: + if comment: + await self.add_comment(pr_number, comment) + + from azure.devops.v7_1.git.models import GitPullRequest + + update_pr = GitPullRequest(status="abandoned") + + await asyncio.to_thread( + self.git_client.update_pull_request, + git_pull_request_to_update=update_pr, + repository_id=self._repo_name, + pull_request_id=pr_number, + project=self._project, + ) + + return True + except Exception: + return False + + # ------------------------------------------------------------------------- + # Issue (Work Item) Operations + # ------------------------------------------------------------------------- + + async def fetch_issue(self, number: int) -> IssueData: + """Fetch a work item by ID.""" + wi = await asyncio.to_thread( + self.wit_client.get_work_item, + id=number, + project=self._project, + expand="All", + ) + return self._parse_work_item(wi) + + async def fetch_issues( + self, filters: Optional[IssueFilters] = None + ) -> List[IssueData]: + """ + Fetch work items with optional filters. + + Note: Results are limited to max_work_items (default 200) to prevent + excessive API calls. For larger result sets, use pagination via the + raw API methods. + """ + filters = filters or IssueFilters() + + from azure.devops.v7_1.work_item_tracking.models import Wiql + + # Build WIQL query with sanitized inputs + # Project name is from config, not user input, but sanitize anyway + project_safe = _sanitize_wiql_string(self._project) + conditions = [f"[System.TeamProject] = '{project_safe}'"] + + state = _validate_state(filters.state) + if state == "open": + conditions.append( + "([System.State] = 'New' OR [System.State] = 'Active')" + ) + elif state == "closed": + conditions.append( + "([System.State] = 'Closed' OR [System.State] = 'Resolved')" + ) + + if filters.author: + author_safe = _sanitize_wiql_string(filters.author) + conditions.append(f"[System.CreatedBy] = '{author_safe}'") + + if filters.assignee: + assignee_safe = _sanitize_wiql_string(filters.assignee) + conditions.append(f"[System.AssignedTo] = '{assignee_safe}'") + + if filters.labels: + for label in filters.labels: + label_safe = _sanitize_wiql_string(label) + conditions.append(f"[System.Tags] CONTAINS '{label_safe}'") + + query = f""" + SELECT [System.Id] + FROM WorkItems + WHERE {' AND '.join(conditions)} + ORDER BY [System.ChangedDate] DESC + """ + + wiql = Wiql(query=query) + + # Use configured limit, capped at max_work_items + effective_limit = min(filters.limit, self._max_work_items) + + result = await asyncio.to_thread( + self.wit_client.query_by_wiql, + wiql=wiql, + project=self._project, + top=effective_limit, + ) + + if not result.work_items: + return [] + + # Fetch full work item details + ids = [wi.id for wi in result.work_items] + work_items = await asyncio.to_thread( + self.wit_client.get_work_items, + ids=ids, + project=self._project, + expand="All", + ) + + return [self._parse_work_item(wi) for wi in work_items] + + async def create_issue( + self, + title: str, + body: str, + labels: Optional[List[str]] = None, + assignees: Optional[List[str]] = None, + ) -> IssueData: + """Create a new work item (Task by default).""" + from azure.devops.v7_1.work_item_tracking.models import JsonPatchOperation + + operations = [ + JsonPatchOperation( + op="add", + path="/fields/System.Title", + value=title, + ), + JsonPatchOperation( + op="add", + path="/fields/System.Description", + value=body, + ), + ] + + if labels: + operations.append( + JsonPatchOperation( + op="add", + path="/fields/System.Tags", + value="; ".join(labels), + ) + ) + + if assignees and len(assignees) > 0: + operations.append( + JsonPatchOperation( + op="add", + path="/fields/System.AssignedTo", + value=assignees[0], # ADO only supports one assignee + ) + ) + + wi = await asyncio.to_thread( + self.wit_client.create_work_item, + document=operations, + project=self._project, + type="Task", + ) + + return self._parse_work_item(wi) + + async def close_issue( + self, + number: int, + comment: Optional[str] = None, + ) -> bool: + """Close a work item.""" + try: + if comment: + await self.add_comment(number, comment) + + from azure.devops.v7_1.work_item_tracking.models import JsonPatchOperation + + operations = [ + JsonPatchOperation( + op="replace", + path="/fields/System.State", + value="Closed", + ) + ] + + await asyncio.to_thread( + self.wit_client.update_work_item, + document=operations, + id=number, + project=self._project, + ) + + return True + except Exception: + return False + + async def add_comment( + self, + issue_or_pr_number: int, + body: str, + ) -> int: + """Add a comment to a work item or PR.""" + # For work items, add a comment via the Discussion field + try: + from azure.devops.v7_1.work_item_tracking.models import JsonPatchOperation + + # Add to history (comment) + operations = [ + JsonPatchOperation( + op="add", + path="/fields/System.History", + value=body, + ) + ] + + await asyncio.to_thread( + self.wit_client.update_work_item, + document=operations, + id=issue_or_pr_number, + project=self._project, + ) + + return 0 # ADO doesn't return comment ID for work items + except Exception: + # Try as PR comment + try: + from azure.devops.v7_1.git.models import Comment, CommentThread + + comment = Comment(content=body) + thread = CommentThread(comments=[comment], status="active") + + result = await asyncio.to_thread( + self.git_client.create_thread, + comment_thread=thread, + repository_id=self._repo_name, + pull_request_id=issue_or_pr_number, + project=self._project, + ) + return result.id if result else 0 + except Exception: + return 0 + + # ------------------------------------------------------------------------- + # Label Operations (via Tags in ADO) + # ------------------------------------------------------------------------- + + async def apply_labels( + self, + issue_or_pr_number: int, + labels: List[str], + ) -> None: + """Apply tags to a work item.""" + try: + # Get current tags + wi = await asyncio.to_thread( + self.wit_client.get_work_item, + id=issue_or_pr_number, + project=self._project, + ) + + current_tags = wi.fields.get("System.Tags", "") or "" + current_tag_list = [t.strip() for t in current_tags.split(";") if t.strip()] + + # Add new tags + for label in labels: + if label not in current_tag_list: + current_tag_list.append(label) + + from azure.devops.v7_1.work_item_tracking.models import JsonPatchOperation + + operations = [ + JsonPatchOperation( + op="replace", + path="/fields/System.Tags", + value="; ".join(current_tag_list), + ) + ] + + await asyncio.to_thread( + self.wit_client.update_work_item, + document=operations, + id=issue_or_pr_number, + project=self._project, + ) + except Exception: + pass + + async def remove_labels( + self, + issue_or_pr_number: int, + labels: List[str], + ) -> None: + """Remove tags from a work item.""" + try: + wi = await asyncio.to_thread( + self.wit_client.get_work_item, + id=issue_or_pr_number, + project=self._project, + ) + + current_tags = wi.fields.get("System.Tags", "") or "" + current_tag_list = [t.strip() for t in current_tags.split(";") if t.strip()] + + # Remove specified tags + for label in labels: + if label in current_tag_list: + current_tag_list.remove(label) + + from azure.devops.v7_1.work_item_tracking.models import JsonPatchOperation + + operations = [ + JsonPatchOperation( + op="replace", + path="/fields/System.Tags", + value="; ".join(current_tag_list), + ) + ] + + await asyncio.to_thread( + self.wit_client.update_work_item, + document=operations, + id=issue_or_pr_number, + project=self._project, + ) + except Exception: + pass + + async def create_label(self, label: LabelData) -> None: + """ + Create a label (tag) in Azure DevOps. + + Note: ADO doesn't have a separate label/tag creation API. + Tags are created automatically when first applied to a work item. + """ + pass # No-op in ADO + + async def list_labels(self) -> List[LabelData]: + """ + List all tags used in the project. + + Note: ADO doesn't have a direct API for listing all tags. + This queries work items to find used tags. + Results are limited to tags from the first 200 work items. + """ + from azure.devops.v7_1.work_item_tracking.models import Wiql + + project_safe = _sanitize_wiql_string(self._project) + wiql = Wiql( + query=f""" + SELECT [System.Id], [System.Tags] + FROM WorkItems + WHERE [System.TeamProject] = '{project_safe}' + AND [System.Tags] <> '' + """ + ) + + result = await asyncio.to_thread( + self.wit_client.query_by_wiql, + wiql=wiql, + project=self._project, + top=self._max_work_items, + ) + + if not result.work_items: + return [] + + ids = [wi.id for wi in result.work_items] + work_items = await asyncio.to_thread( + self.wit_client.get_work_items, + ids=ids, + fields=["System.Tags"], + project=self._project, + ) + + # Collect unique tags + tags = set() + for wi in work_items: + tag_string = wi.fields.get("System.Tags", "") or "" + for tag in tag_string.split(";"): + tag = tag.strip() + if tag: + tags.add(tag) + + return [LabelData(name=tag, color="", description="") for tag in sorted(tags)] + + # ------------------------------------------------------------------------- + # Repository Operations + # ------------------------------------------------------------------------- + + async def get_repository_info(self) -> dict[str, Any]: + """Get repository information.""" + repo = await asyncio.to_thread( + self.git_client.get_repository, + repository_id=self._repo_name, + project=self._project, + ) + + return { + "id": repo.id, + "name": repo.name, + "default_branch": repo.default_branch.replace("refs/heads/", "") + if repo.default_branch + else "main", + "web_url": repo.web_url, + "size": repo.size, + "project": { + "id": repo.project.id if repo.project else None, + "name": repo.project.name if repo.project else self._project, + }, + } + + async def get_default_branch(self) -> str: + """Get the default branch name.""" + repo_info = await self.get_repository_info() + return repo_info.get("default_branch", "main") + + async def check_permissions(self, username: str) -> str: + """ + Check a user's permission level on the repository. + + Note: ADO permissions are more complex and project-based. + This returns a simplified permission level. + """ + # ADO doesn't have a simple permission check like GitHub + # Return "write" as default for authenticated users + return "write" + + # ------------------------------------------------------------------------- + # API Operations (Low-level) + # ------------------------------------------------------------------------- + + async def api_get( + self, + endpoint: str, + params: Optional[dict[str, Any]] = None, + ) -> Any: + """Make a GET request to the Azure DevOps API.""" + import urllib.request + import urllib.parse + import json + import base64 + + pat = self._get_pat() + url = f"{self._instance_url}/{self._organization}/{self._project}/_apis{endpoint}" + + if params: + url += "?" + urllib.parse.urlencode(params) + + # Add API version if not present + if "api-version" not in url: + separator = "&" if "?" in url else "?" + url += f"{separator}api-version=7.1" + + auth = base64.b64encode(f":{pat}".encode()).decode() + + req = urllib.request.Request(url) + req.add_header("Authorization", f"Basic {auth}") + req.add_header("Content-Type", "application/json") + + def _do_request(): + with urllib.request.urlopen(req, timeout=30) as response: + return json.loads(response.read().decode()) + + return await asyncio.to_thread(_do_request) + + async def api_post( + self, + endpoint: str, + data: Optional[dict[str, Any]] = None, + ) -> Any: + """Make a POST request to the Azure DevOps API.""" + import urllib.request + import json + import base64 + + pat = self._get_pat() + url = f"{self._instance_url}/{self._organization}/{self._project}/_apis{endpoint}" + + if "api-version" not in url: + separator = "&" if "?" in url else "?" + url += f"{separator}api-version=7.1" + + auth = base64.b64encode(f":{pat}".encode()).decode() + + req = urllib.request.Request(url, method="POST") + req.add_header("Authorization", f"Basic {auth}") + req.add_header("Content-Type", "application/json") + + body = json.dumps(data).encode() if data else None + + def _do_request(): + with urllib.request.urlopen(req, data=body, timeout=30) as response: + return json.loads(response.read().decode()) + + return await asyncio.to_thread(_do_request) + + # ------------------------------------------------------------------------- + # Helper Methods + # ------------------------------------------------------------------------- + + def _parse_pr_data(self, pr: Any, diff: str) -> PRData: + """Parse Azure DevOps PR data into PRData.""" + author = getattr(pr.created_by, "display_name", "unknown") + + labels = [] + for label in pr.labels or []: + labels.append(label.name) + + # Get file changes + files = [] + # Note: ADO returns changes differently, would need additional API call + + return PRData( + number=pr.pull_request_id, + title=pr.title or "", + body=pr.description or "", + author=author, + state="open" if pr.status == "active" else "closed", + source_branch=pr.source_ref_name.replace("refs/heads/", "") + if pr.source_ref_name + else "", + target_branch=pr.target_ref_name.replace("refs/heads/", "") + if pr.target_ref_name + else "", + additions=0, # ADO doesn't provide this directly + deletions=0, + changed_files=0, + files=files, + diff=diff, + url=f"{self._instance_url}/{self._organization}/{self._project}/_git/{self._repo_name}/pullrequest/{pr.pull_request_id}", + created_at=self._parse_datetime(pr.creation_date), + updated_at=self._parse_datetime(pr.creation_date), # Use creation as fallback + labels=labels, + reviewers=[ + r.display_name for r in (pr.reviewers or []) if hasattr(r, "display_name") + ], + is_draft=pr.is_draft if hasattr(pr, "is_draft") else False, + mergeable=pr.merge_status != "conflicts" if hasattr(pr, "merge_status") else True, + provider=ProviderType.AZURE_DEVOPS, + raw_data={"pull_request_id": pr.pull_request_id}, + ) + + def _parse_work_item(self, wi: Any) -> IssueData: + """Parse Azure DevOps work item into IssueData.""" + fields = wi.fields or {} + + author = fields.get("System.CreatedBy", {}) + if isinstance(author, dict): + author = author.get("displayName", "unknown") + else: + author = str(author) if author else "unknown" + + state = fields.get("System.State", "New") + state_normalized = "closed" if state in ["Closed", "Resolved", "Done"] else "open" + + tags = fields.get("System.Tags", "") or "" + labels = [t.strip() for t in tags.split(";") if t.strip()] + + assigned = fields.get("System.AssignedTo", {}) + assignees: List[str] = [] + if assigned: + if isinstance(assigned, dict): + assignees = [assigned.get("displayName", "")] + else: + assignees = [str(assigned)] + + return IssueData( + number=wi.id, + title=fields.get("System.Title", ""), + body=fields.get("System.Description", "") or "", + author=author, + state=state_normalized, + labels=labels, + created_at=self._parse_datetime(fields.get("System.CreatedDate")), + updated_at=self._parse_datetime(fields.get("System.ChangedDate")), + url=f"{self._instance_url}/{self._organization}/{self._project}/_workitems/edit/{wi.id}", + assignees=assignees, + milestone=fields.get("System.IterationPath"), + provider=ProviderType.AZURE_DEVOPS, + raw_data={"id": wi.id, "fields": fields}, + ) + + def _parse_datetime(self, dt: Any) -> datetime: + """Parse datetime from various formats.""" + if dt is None: + return datetime.now(timezone.utc) + + if isinstance(dt, datetime): + return dt + + if isinstance(dt, str): + try: + return datetime.fromisoformat(dt.replace("Z", "+00:00")) + except (ValueError, AttributeError): + pass + + return datetime.now(timezone.utc) diff --git a/apps/backend/runners/github/providers/factory.py b/apps/backend/runners/github/providers/factory.py index 221244a8d4..57f3274cdb 100644 --- a/apps/backend/runners/github/providers/factory.py +++ b/apps/backend/runners/github/providers/factory.py @@ -12,6 +12,7 @@ from typing import Any from .github_provider import GitHubProvider +from .azure_devops_provider import AzureDevOpsProvider from .protocol import GitProvider, ProviderType # Provider registry for dynamic registration @@ -99,9 +100,19 @@ def get_provider( ) if provider_type == ProviderType.AZURE_DEVOPS: - raise NotImplementedError( - "Azure DevOps provider not yet implemented. " - "See providers/azure_devops_provider.py.stub for interface." + # Extract ADO-specific kwargs + organization = kwargs.pop("organization", None) + project = kwargs.pop("project", None) + repo_name = kwargs.pop("repo_name", None) + pat = kwargs.pop("pat", None) + instance_url = kwargs.pop("instance_url", None) + + return AzureDevOpsProvider( + _organization=organization, + _project=project, + _repo_name=repo_name or repo.split("/")[-1] if "/" in repo else repo, + _pat=pat, + _instance_url=instance_url, ) raise ValueError(f"Unsupported provider type: {provider_type}") @@ -114,7 +125,7 @@ def list_available_providers() -> list[ProviderType]: Returns: List of available ProviderType values """ - available = [ProviderType.GITHUB] # Built-in + available = [ProviderType.GITHUB, ProviderType.AZURE_DEVOPS] # Built-in # Add registered providers for provider_type in _PROVIDER_REGISTRY: @@ -140,8 +151,8 @@ def is_provider_available(provider_type: ProviderType | str) -> bool: except ValueError: return False - # GitHub is always available - if provider_type == ProviderType.GITHUB: + # GitHub and Azure DevOps are always available + if provider_type in (ProviderType.GITHUB, ProviderType.AZURE_DEVOPS): return True # Check registry diff --git a/apps/backend/runners/github/runner.py b/apps/backend/runners/github/runner.py index 669030e46f..b3934cdc93 100644 --- a/apps/backend/runners/github/runner.py +++ b/apps/backend/runners/github/runner.py @@ -56,8 +56,10 @@ # Add backend to path sys.path.insert(0, str(Path(__file__).parent.parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent.parent / ".env" if env_file.exists(): diff --git a/apps/backend/runners/github/services/followup_reviewer.py b/apps/backend/runners/github/services/followup_reviewer.py index 8b8a24181d..5c1c8bbca0 100644 --- a/apps/backend/runners/github/services/followup_reviewer.py +++ b/apps/backend/runners/github/services/followup_reviewer.py @@ -26,6 +26,7 @@ from ..models import FollowupReviewContext, GitHubRunnerConfig try: + from ..gh_client import GHClient from ..models import ( MergeVerdict, PRReviewFinding, @@ -37,6 +38,7 @@ from .prompt_manager import PromptManager from .pydantic_models import FollowupReviewResponse except (ImportError, ValueError, SystemError): + from gh_client import GHClient from models import ( MergeVerdict, PRReviewFinding, @@ -230,6 +232,27 @@ async def review_followup( "complete", 100, "Follow-up review complete!", context.pr_number ) + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") + return PRReviewResult( pr_number=context.pr_number, repo=self.config.repo, @@ -243,6 +266,7 @@ async def review_followup( reviewed_at=datetime.now().isoformat(), # Follow-up specific fields reviewed_commit_sha=context.current_commit_sha, + reviewed_file_blobs=file_blobs, is_followup_review=True, previous_review_id=context.previous_review.review_id, resolved_findings=[f.id for f in resolved], diff --git a/apps/backend/runners/github/services/parallel_followup_reviewer.py b/apps/backend/runners/github/services/parallel_followup_reviewer.py index fb7a04365b..9b05a48181 100644 --- a/apps/backend/runners/github/services/parallel_followup_reviewer.py +++ b/apps/backend/runners/github/services/parallel_followup_reviewer.py @@ -32,6 +32,8 @@ try: from ...core.client import create_client from ...phase_config import get_thinking_budget + from ..context_gatherer import _validate_git_ref + from ..gh_client import GHClient from ..models import ( GitHubRunnerConfig, MergeVerdict, @@ -40,10 +42,13 @@ ReviewSeverity, ) from .category_utils import map_category + from .pr_worktree_manager import PRWorktreeManager from .pydantic_models import ParallelFollowupResponse from .sdk_utils import process_sdk_stream except (ImportError, ValueError, SystemError): + from context_gatherer import _validate_git_ref from core.client import create_client + from gh_client import GHClient from models import ( GitHubRunnerConfig, MergeVerdict, @@ -53,6 +58,7 @@ ) from phase_config import get_thinking_budget from services.category_utils import map_category + from services.pr_worktree_manager import PRWorktreeManager from services.pydantic_models import ParallelFollowupResponse from services.sdk_utils import process_sdk_stream @@ -62,6 +68,9 @@ # Check if debug mode is enabled DEBUG_MODE = os.environ.get("DEBUG", "").lower() in ("true", "1", "yes") +# Directory for PR review worktrees (shared with initial reviewer) +PR_WORKTREE_DIR = ".auto-claude/github/pr/worktrees" + # Severity mapping for AI responses _SEVERITY_MAPPING = { "critical": ReviewSeverity.CRITICAL, @@ -106,6 +115,7 @@ def __init__( self.github_dir = Path(github_dir) self.config = config self.progress_callback = progress_callback + self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR) def _report_progress(self, phase: str, progress: int, message: str, **kwargs): """Report progress if callback is set.""" @@ -136,6 +146,37 @@ def _load_prompt(self, filename: str) -> str: logger.warning(f"Prompt file not found: {prompt_file}") return "" + def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path: + """Create a temporary worktree at the PR head commit. + + Args: + head_sha: The commit SHA of the PR head (validated before use) + pr_number: The PR number for naming + + Returns: + Path to the created worktree + + Raises: + RuntimeError: If worktree creation fails + ValueError: If head_sha fails validation (command injection prevention) + """ + # SECURITY: Validate git ref before use in subprocess calls + if not _validate_git_ref(head_sha): + raise ValueError( + f"Invalid git ref: '{head_sha}'. " + "Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens." + ) + + return self.worktree_manager.create_worktree(head_sha, pr_number) + + def _cleanup_pr_worktree(self, worktree_path: Path) -> None: + """Remove a temporary PR review worktree with fallback chain. + + Args: + worktree_path: Path to the worktree to remove + """ + self.worktree_manager.remove_worktree(worktree_path) + def _define_specialist_agents(self) -> dict[str, AgentDefinition]: """ Define specialist agents for follow-up review. @@ -265,6 +306,44 @@ def _format_ai_reviews(self, context: FollowupReviewContext) -> str: return "\n\n---\n\n".join(ai_content) + def _format_ci_status(self, context: FollowupReviewContext) -> str: + """Format CI status for the prompt.""" + ci_status = context.ci_status + if not ci_status: + return "CI status not available." + + passing = ci_status.get("passing", 0) + failing = ci_status.get("failing", 0) + pending = ci_status.get("pending", 0) + failed_checks = ci_status.get("failed_checks", []) + awaiting_approval = ci_status.get("awaiting_approval", 0) + + lines = [] + + # Overall status + if failing > 0: + lines.append(f"⚠️ **{failing} CI check(s) FAILING** - PR cannot be merged") + elif pending > 0: + lines.append(f"⏳ **{pending} CI check(s) pending** - Wait for completion") + elif passing > 0: + lines.append(f"βœ… **All {passing} CI check(s) passing**") + else: + lines.append("No CI checks configured") + + # List failed checks + if failed_checks: + lines.append("\n**Failed checks:**") + for check in failed_checks: + lines.append(f" - ❌ {check}") + + # Awaiting approval (fork PRs) + if awaiting_approval > 0: + lines.append( + f"\n⏸️ **{awaiting_approval} workflow(s) awaiting maintainer approval** (fork PR)" + ) + + return "\n".join(lines) + def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: """Build full prompt for orchestrator with follow-up context.""" # Load orchestrator prompt @@ -277,6 +356,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: commits = self._format_commits(context) contributor_comments = self._format_comments(context) ai_reviews = self._format_ai_reviews(context) + ci_status = self._format_ci_status(context) # Truncate diff if too long MAX_DIFF_CHARS = 100_000 @@ -295,6 +375,9 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: **New Commits:** {len(context.commits_since_review)} **Files Changed:** {len(context.files_changed_since_review)} +### CI Status (CRITICAL - Must Factor Into Verdict) +{ci_status} + ### Previous Review Summary {context.previous_review.summary[:500] if context.previous_review.summary else "No summary available."} @@ -323,6 +406,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: Now analyze this follow-up and delegate to the appropriate specialist agents. Remember: YOU decide which agents to invoke based on YOUR analysis. The SDK will run invoked agents in parallel automatically. +**CRITICAL: Your verdict MUST account for CI status. Failing CI = BLOCKED verdict.** """ return base_prompt + followup_context @@ -341,6 +425,9 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: f"[ParallelFollowup] Starting follow-up review for PR #{context.pr_number}" ) + # Track worktree for cleanup + worktree_path: Path | None = None + try: self._report_progress( "orchestrating", @@ -352,13 +439,48 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: # Build orchestrator prompt prompt = self._build_orchestrator_prompt(context) - # Get project root + # Get project root - default to local checkout project_root = ( self.project_dir.parent.parent if self.project_dir.name == "backend" else self.project_dir ) + # Create temporary worktree at PR head commit for isolated review + # This ensures agents read from the correct PR state, not the current checkout + head_sha = context.current_commit_sha + if head_sha and _validate_git_ref(head_sha): + try: + if DEBUG_MODE: + print( + f"[Followup] DEBUG: Creating worktree for head_sha={head_sha}", + flush=True, + ) + worktree_path = self._create_pr_worktree( + head_sha, context.pr_number + ) + project_root = worktree_path + print( + f"[Followup] Using worktree at {worktree_path.name} for PR review", + flush=True, + ) + except Exception as e: + if DEBUG_MODE: + print( + f"[Followup] DEBUG: Worktree creation FAILED: {e}", + flush=True, + ) + logger.warning( + f"[ParallelFollowup] Worktree creation failed, " + f"falling back to local checkout: {e}" + ) + # Fallback to original behavior if worktree creation fails + else: + logger.warning( + f"[ParallelFollowup] Invalid or missing head_sha '{head_sha}', " + "using local checkout" + ) + # Use model and thinking level from config (user settings) model = self.config.model or "claude-sonnet-4-5-20250929" thinking_level = self.config.thinking_level or "medium" @@ -459,15 +581,45 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: f"{len(resolved_ids)} resolved, {len(unresolved_ids)} unresolved" ) + # Generate blockers from critical/high/medium severity findings + # (Medium also blocks merge in our strict quality gates approach) + blockers = [] + + # CRITICAL: Merge conflicts block merging - check FIRST before summary generation + # This must happen before _generate_summary so the summary reflects merge conflict status + if context.has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + # Override verdict to BLOCKED if merge conflicts exist + verdict = MergeVerdict.BLOCKED + verdict_reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) + print( + "[ParallelFollowup] ⚠️ PR has merge conflicts - blocking merge", + flush=True, + ) + + for finding in unique_findings: + if finding.severity in ( + ReviewSeverity.CRITICAL, + ReviewSeverity.HIGH, + ReviewSeverity.MEDIUM, + ): + blockers.append(f"{finding.category.value}: {finding.title}") + # Extract validation counts dismissed_count = len(result_data.get("dismissed_false_positive_ids", [])) confirmed_count = result_data.get("confirmed_valid_count", 0) needs_human_count = result_data.get("needs_human_review_count", 0) - # Generate summary + # Generate summary (AFTER merge conflict check so it reflects correct verdict) summary = self._generate_summary( verdict=verdict, verdict_reasoning=verdict_reasoning, + blockers=blockers, resolved_count=len(resolved_ids), unresolved_count=len(unresolved_ids), new_count=len(new_finding_ids), @@ -487,16 +639,26 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: else: overall_status = "approve" - # Generate blockers from critical/high/medium severity findings - # (Medium also blocks merge in our strict quality gates approach) - blockers = [] - for finding in unique_findings: - if finding.severity in ( - ReviewSeverity.CRITICAL, - ReviewSeverity.HIGH, - ReviewSeverity.MEDIUM, - ): - blockers.append(f"{finding.category.value}: {finding.title}") + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") result = PRReviewResult( pr_number=context.pr_number, @@ -509,6 +671,7 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: verdict_reasoning=verdict_reasoning, blockers=blockers, reviewed_commit_sha=context.current_commit_sha, + reviewed_file_blobs=file_blobs, is_followup_review=True, previous_review_id=context.previous_review.review_id or context.previous_review.pr_number, @@ -543,6 +706,10 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: is_followup_review=True, reviewed_commit_sha=context.current_commit_sha, ) + finally: + # Always cleanup worktree, even on error + if worktree_path: + self._cleanup_pr_worktree(worktree_path) def _parse_structured_output( self, data: dict, context: FollowupReviewContext @@ -614,13 +781,11 @@ def _parse_structured_output( validation = validation_map.get(rv.finding_id) validation_status = None validation_evidence = None - validation_confidence = None validation_explanation = None if validation: validation_status = validation.validation_status validation_evidence = validation.code_evidence - validation_confidence = validation.confidence validation_explanation = validation.explanation findings.append( @@ -636,7 +801,6 @@ def _parse_structured_output( fixable=original.fixable, validation_status=validation_status, validation_evidence=validation_evidence, - validation_confidence=validation_confidence, validation_explanation=validation_explanation, ) ) @@ -805,6 +969,7 @@ def _generate_summary( self, verdict: MergeVerdict, verdict_reasoning: str, + blockers: list[str], resolved_count: int, unresolved_count: int, new_count: int, @@ -838,6 +1003,15 @@ def _generate_summary( - πŸ” **Dismissed as False Positives**: {dismissed_false_positive_count} findings were re-investigated and found to be incorrect - βœ“ **Confirmed Valid**: {confirmed_valid_count} findings verified as genuine issues - πŸ‘€ **Needs Human Review**: {needs_human_review_count} findings require manual verification +""" + + # Build blockers section if there are any blockers + blockers_section = "" + if blockers: + blockers_list = "\n".join(f"- {b}" for b in blockers) + blockers_section = f""" +### 🚨 Blocking Issues +{blockers_list} """ summary = f"""## {emoji} Follow-up Review: {verdict.value.replace("_", " ").title()} @@ -846,7 +1020,7 @@ def _generate_summary( - βœ… **Resolved**: {resolved_count} previous findings addressed - ❌ **Unresolved**: {unresolved_count} previous findings remain - πŸ†• **New Issues**: {new_count} new findings in recent changes -{validation_section} +{validation_section}{blockers_section} ### Verdict {verdict_reasoning} diff --git a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py index 7b7fe00c54..0a2f88ca5b 100644 --- a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py +++ b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py @@ -20,9 +20,6 @@ import hashlib import logging import os -import shutil -import subprocess -import uuid from pathlib import Path from typing import Any @@ -32,6 +29,7 @@ from ...core.client import create_client from ...phase_config import get_thinking_budget from ..context_gatherer import PRContext, _validate_git_ref + from ..gh_client import GHClient from ..models import ( GitHubRunnerConfig, MergeVerdict, @@ -40,11 +38,13 @@ ReviewSeverity, ) from .category_utils import map_category + from .pr_worktree_manager import PRWorktreeManager from .pydantic_models import ParallelOrchestratorResponse from .sdk_utils import process_sdk_stream except (ImportError, ValueError, SystemError): from context_gatherer import PRContext, _validate_git_ref from core.client import create_client + from gh_client import GHClient from models import ( GitHubRunnerConfig, MergeVerdict, @@ -54,6 +54,7 @@ ) from phase_config import get_thinking_budget from services.category_utils import map_category + from services.pr_worktree_manager import PRWorktreeManager from services.pydantic_models import ParallelOrchestratorResponse from services.sdk_utils import process_sdk_stream @@ -92,6 +93,7 @@ def __init__( self.github_dir = Path(github_dir) self.config = config self.progress_callback = progress_callback + self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR) def _report_progress(self, phase: str, progress: int, message: str, **kwargs): """Report progress if callback is set.""" @@ -143,78 +145,7 @@ def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path: "Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens." ) - worktree_name = f"pr-{pr_number}-{uuid.uuid4().hex[:8]}" - worktree_dir = self.project_dir / PR_WORKTREE_DIR - - if DEBUG_MODE: - print(f"[PRReview] DEBUG: project_dir={self.project_dir}", flush=True) - print(f"[PRReview] DEBUG: worktree_dir={worktree_dir}", flush=True) - print(f"[PRReview] DEBUG: head_sha={head_sha}", flush=True) - - worktree_dir.mkdir(parents=True, exist_ok=True) - worktree_path = worktree_dir / worktree_name - - if DEBUG_MODE: - print(f"[PRReview] DEBUG: worktree_path={worktree_path}", flush=True) - print( - f"[PRReview] DEBUG: worktree_dir exists={worktree_dir.exists()}", - flush=True, - ) - - # Fetch the commit if not available locally (handles fork PRs) - fetch_result = subprocess.run( - ["git", "fetch", "origin", head_sha], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=60, - ) - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: fetch returncode={fetch_result.returncode}", - flush=True, - ) - if fetch_result.stderr: - print( - f"[PRReview] DEBUG: fetch stderr={fetch_result.stderr[:200]}", - flush=True, - ) - - # Create detached worktree at the PR commit - result = subprocess.run( - ["git", "worktree", "add", "--detach", str(worktree_path), head_sha], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=120, # Worktree add can be slow for large repos - ) - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree add returncode={result.returncode}", - flush=True, - ) - if result.stderr: - print( - f"[PRReview] DEBUG: worktree add stderr={result.stderr[:200]}", - flush=True, - ) - if result.stdout: - print( - f"[PRReview] DEBUG: worktree add stdout={result.stdout[:200]}", - flush=True, - ) - - if result.returncode != 0: - raise RuntimeError(f"Failed to create worktree: {result.stderr}") - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree created, exists={worktree_path.exists()}", - flush=True, - ) - logger.info(f"[PRReview] Created worktree at {worktree_path}") - return worktree_path + return self.worktree_manager.create_worktree(head_sha, pr_number) def _cleanup_pr_worktree(self, worktree_path: Path) -> None: """Remove a temporary PR review worktree with fallback chain. @@ -222,100 +153,16 @@ def _cleanup_pr_worktree(self, worktree_path: Path) -> None: Args: worktree_path: Path to the worktree to remove """ - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: _cleanup_pr_worktree called with {worktree_path}", - flush=True, - ) - - if not worktree_path or not worktree_path.exists(): - if DEBUG_MODE: - print( - "[PRReview] DEBUG: worktree path doesn't exist, skipping cleanup", - flush=True, - ) - return - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: Attempting to remove worktree at {worktree_path}", - flush=True, - ) - - # Try 1: git worktree remove - result = subprocess.run( - ["git", "worktree", "remove", "--force", str(worktree_path)], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=30, - ) - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree remove returncode={result.returncode}", - flush=True, - ) - - if result.returncode == 0: - logger.info(f"[PRReview] Cleaned up worktree: {worktree_path.name}") - return - - # Try 2: shutil.rmtree fallback - try: - shutil.rmtree(worktree_path, ignore_errors=True) - subprocess.run( - ["git", "worktree", "prune"], - cwd=self.project_dir, - capture_output=True, - timeout=30, - ) - logger.warning(f"[PRReview] Used shutil fallback for: {worktree_path.name}") - except Exception as e: - logger.error(f"[PRReview] Failed to cleanup worktree {worktree_path}: {e}") + self.worktree_manager.remove_worktree(worktree_path) def _cleanup_stale_pr_worktrees(self) -> None: - """Clean up orphaned PR review worktrees on startup.""" - worktree_dir = self.project_dir / PR_WORKTREE_DIR - if not worktree_dir.exists(): - return - - # Get registered worktrees from git - result = subprocess.run( - ["git", "worktree", "list", "--porcelain"], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=30, - ) - registered = set() - for line in result.stdout.split("\n"): - if line.startswith("worktree "): - # Safely parse - check bounds to prevent IndexError - parts = line.split(" ", 1) - if len(parts) > 1 and parts[1]: - registered.add(Path(parts[1])) - - # Remove unregistered directories - stale_count = 0 - for item in worktree_dir.iterdir(): - if item.is_dir() and item not in registered: - logger.info(f"[PRReview] Removing stale worktree: {item.name}") - shutil.rmtree(item, ignore_errors=True) - stale_count += 1 - - if stale_count > 0: - subprocess.run( - ["git", "worktree", "prune"], - cwd=self.project_dir, - capture_output=True, - timeout=30, + """Clean up orphaned, expired, and excess PR review worktrees on startup.""" + stats = self.worktree_manager.cleanup_worktrees() + if stats["total"] > 0: + logger.info( + f"[PRReview] Cleanup: removed {stats['total']} worktrees " + f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})" ) - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: Cleaned up {stale_count} stale worktree(s)", - flush=True, - ) def _define_specialist_agents(self) -> dict[str, AgentDefinition]: """ @@ -584,7 +431,7 @@ def _create_finding_from_structured(self, finding_data: Any) -> PRReviewFinding: category=category, severity=severity, suggested_fix=finding_data.suggested_fix or "", - confidence=self._normalize_confidence(finding_data.confidence), + evidence=finding_data.evidence, ) async def review(self, context: PRContext) -> PRReviewResult: @@ -769,9 +616,9 @@ async def review(self, context: PRContext) -> PRReviewResult: f"[ParallelOrchestrator] Review complete: {len(unique_findings)} findings" ) - # Generate verdict + # Generate verdict (includes merge conflict check) verdict, verdict_reasoning, blockers = self._generate_verdict( - unique_findings + unique_findings, has_merge_conflicts=context.has_merge_conflicts ) # Generate summary @@ -799,6 +646,27 @@ async def review(self, context: PRContext) -> PRReviewResult: latest_commit = context.commits[-1] head_sha = latest_commit.get("oid") or latest_commit.get("sha") + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") + result = PRReviewResult( pr_number=context.pr_number, repo=self.config.repo, @@ -810,6 +678,7 @@ async def review(self, context: PRContext) -> PRReviewResult: verdict_reasoning=verdict_reasoning, blockers=blockers, reviewed_commit_sha=head_sha, + reviewed_file_blobs=file_blobs, ) self._report_progress( @@ -945,7 +814,7 @@ def _create_finding_from_dict(self, f_data: dict[str, Any]) -> PRReviewFinding: category=category, severity=severity, suggested_fix=f_data.get("suggested_fix", ""), - confidence=self._normalize_confidence(f_data.get("confidence", 85)), + evidence=f_data.get("evidence"), ) def _parse_text_output(self, output: str) -> list[PRReviewFinding]: @@ -993,11 +862,17 @@ def _deduplicate_findings( return unique def _generate_verdict( - self, findings: list[PRReviewFinding] + self, findings: list[PRReviewFinding], has_merge_conflicts: bool = False ) -> tuple[MergeVerdict, str, list[str]]: - """Generate merge verdict based on findings.""" + """Generate merge verdict based on findings and merge conflict status.""" blockers = [] + # CRITICAL: Merge conflicts block merging - check first + if has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL] high = [f for f in findings if f.severity == ReviewSeverity.HIGH] medium = [f for f in findings if f.severity == ReviewSeverity.MEDIUM] @@ -1007,8 +882,19 @@ def _generate_verdict( blockers.append(f"Critical: {f.title} ({f.file}:{f.line})") if blockers: - verdict = MergeVerdict.BLOCKED - reasoning = f"Blocked by {len(blockers)} critical issue(s)" + # Merge conflicts are the highest priority blocker + if has_merge_conflicts: + verdict = MergeVerdict.BLOCKED + reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) + elif critical: + verdict = MergeVerdict.BLOCKED + reasoning = f"Blocked by {len(critical)} critical issue(s)" + else: + verdict = MergeVerdict.BLOCKED + reasoning = f"Blocked by {len(blockers)} issue(s)" elif high or medium: # High and Medium severity findings block merge verdict = MergeVerdict.NEEDS_REVISION diff --git a/apps/backend/runners/github/services/pr_review_engine.py b/apps/backend/runners/github/services/pr_review_engine.py index 24d1fb69f0..d8832539e7 100644 --- a/apps/backend/runners/github/services/pr_review_engine.py +++ b/apps/backend/runners/github/services/pr_review_engine.py @@ -242,7 +242,9 @@ async def run_review_pass( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text if review_pass == ReviewPass.QUICK_SCAN: @@ -502,7 +504,9 @@ async def _run_structural_pass(self, context: PRContext) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text except Exception as e: print(f"[AI] Structural pass error: {e}", flush=True) @@ -558,7 +562,9 @@ async def _run_ai_triage_pass(self, context: PRContext) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text except Exception as e: print(f"[AI] AI triage pass error: {e}", flush=True) diff --git a/apps/backend/runners/github/services/pr_worktree_manager.py b/apps/backend/runners/github/services/pr_worktree_manager.py new file mode 100644 index 0000000000..1fa921bfa1 --- /dev/null +++ b/apps/backend/runners/github/services/pr_worktree_manager.py @@ -0,0 +1,425 @@ +""" +PR Worktree Manager +=================== + +Manages lifecycle of PR review worktrees with cleanup policies. + +Features: +- Age-based cleanup (remove worktrees older than N days) +- Count-based cleanup (keep only N most recent worktrees) +- Orphaned worktree cleanup (worktrees not registered with git) +- Automatic cleanup on review completion +""" + +from __future__ import annotations + +import logging +import os +import shutil +import subprocess +import time +from pathlib import Path +from typing import NamedTuple + +logger = logging.getLogger(__name__) + +# Default cleanup policies (can be overridden via environment variables) +DEFAULT_MAX_PR_WORKTREES = 10 # Max worktrees to keep +DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = 7 # Max age in days + + +def _get_max_pr_worktrees() -> int: + """Get max worktrees setting, read at runtime for testability.""" + try: + value = int(os.environ.get("MAX_PR_WORKTREES", str(DEFAULT_MAX_PR_WORKTREES))) + return value if value > 0 else DEFAULT_MAX_PR_WORKTREES + except (ValueError, TypeError): + return DEFAULT_MAX_PR_WORKTREES + + +def _get_max_age_days() -> int: + """Get max age setting, read at runtime for testability.""" + try: + value = int( + os.environ.get( + "PR_WORKTREE_MAX_AGE_DAYS", str(DEFAULT_PR_WORKTREE_MAX_AGE_DAYS) + ) + ) + return value if value >= 0 else DEFAULT_PR_WORKTREE_MAX_AGE_DAYS + except (ValueError, TypeError): + return DEFAULT_PR_WORKTREE_MAX_AGE_DAYS + + +# Safe pattern for git refs (SHA, branch names) +# Allows: alphanumeric, dots, underscores, hyphens, forward slashes +import re + +SAFE_REF_PATTERN = re.compile(r"^[a-zA-Z0-9._/\-]+$") + + +class WorktreeInfo(NamedTuple): + """Information about a PR worktree.""" + + path: Path + age_days: float + pr_number: int | None = None + + +class PRWorktreeManager: + """ + Manages PR review worktrees with automatic cleanup policies. + + Cleanup policies: + 1. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS (default: 7 days) + 2. Keep only MAX_PR_WORKTREES most recent worktrees (default: 10) + 3. Remove orphaned worktrees (not registered with git) + """ + + def __init__(self, project_dir: Path, worktree_dir: str | Path): + """ + Initialize the worktree manager. + + Args: + project_dir: Root directory of the git project + worktree_dir: Directory where PR worktrees are stored (relative to project_dir) + """ + self.project_dir = Path(project_dir) + self.worktree_base_dir = self.project_dir / worktree_dir + + def create_worktree( + self, head_sha: str, pr_number: int, auto_cleanup: bool = True + ) -> Path: + """ + Create a PR worktree with automatic cleanup of old worktrees. + + Args: + head_sha: Git commit SHA to checkout + pr_number: PR number for naming + auto_cleanup: If True (default), run cleanup before creating + + Returns: + Path to the created worktree + + Raises: + RuntimeError: If worktree creation fails + ValueError: If head_sha or pr_number are invalid + """ + # Validate inputs to prevent command injection + if not head_sha or not SAFE_REF_PATTERN.match(head_sha): + raise ValueError( + f"Invalid head_sha: must match pattern {SAFE_REF_PATTERN.pattern}" + ) + if not isinstance(pr_number, int) or pr_number <= 0: + raise ValueError( + f"Invalid pr_number: must be a positive integer, got {pr_number}" + ) + + # Run cleanup before creating new worktree (can be disabled for tests) + if auto_cleanup: + self.cleanup_worktrees() + + # Generate worktree name with timestamp for uniqueness + sha_short = head_sha[:8] + timestamp = int(time.time() * 1000) # Millisecond precision + worktree_name = f"pr-{pr_number}-{sha_short}-{timestamp}" + + # Create worktree directory + self.worktree_base_dir.mkdir(parents=True, exist_ok=True) + worktree_path = self.worktree_base_dir / worktree_name + + logger.debug(f"Creating worktree: {worktree_path}") + + try: + # Fetch the commit if not available locally (handles fork PRs) + fetch_result = subprocess.run( + ["git", "fetch", "origin", head_sha], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=60, + ) + + if fetch_result.returncode != 0: + logger.warning( + f"Could not fetch {head_sha} from origin (fork PR?): {fetch_result.stderr}" + ) + except subprocess.TimeoutExpired: + logger.warning( + f"Timeout fetching {head_sha} from origin, continuing anyway" + ) + + try: + # Create detached worktree at the PR commit + result = subprocess.run( + ["git", "worktree", "add", "--detach", str(worktree_path), head_sha], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=120, + ) + + if result.returncode != 0: + raise RuntimeError(f"Failed to create worktree: {result.stderr}") + except subprocess.TimeoutExpired: + # Clean up partial worktree on timeout + if worktree_path.exists(): + shutil.rmtree(worktree_path, ignore_errors=True) + raise RuntimeError(f"Timeout creating worktree for {head_sha}") + + logger.info(f"[WorktreeManager] Created worktree at {worktree_path}") + return worktree_path + + def remove_worktree(self, worktree_path: Path) -> None: + """ + Remove a PR worktree with fallback chain. + + Args: + worktree_path: Path to the worktree to remove + """ + if not worktree_path or not worktree_path.exists(): + return + + logger.debug(f"Removing worktree: {worktree_path}") + + # Try 1: git worktree remove + try: + result = subprocess.run( + ["git", "worktree", "remove", "--force", str(worktree_path)], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=60, + ) + + if result.returncode == 0: + logger.info(f"[WorktreeManager] Removed worktree: {worktree_path.name}") + return + except subprocess.TimeoutExpired: + logger.warning( + f"Timeout removing worktree {worktree_path.name}, falling back to shutil" + ) + + # Try 2: shutil.rmtree fallback + try: + shutil.rmtree(worktree_path, ignore_errors=True) + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + logger.warning( + f"[WorktreeManager] Used shutil fallback for: {worktree_path.name}" + ) + except Exception as e: + logger.error( + f"[WorktreeManager] Failed to remove worktree {worktree_path}: {e}" + ) + + def get_worktree_info(self) -> list[WorktreeInfo]: + """ + Get information about all PR worktrees. + + Returns: + List of WorktreeInfo objects sorted by age (oldest first) + """ + if not self.worktree_base_dir.exists(): + return [] + + worktrees = [] + current_time = time.time() + + for item in self.worktree_base_dir.iterdir(): + if not item.is_dir(): + continue + + # Get modification time + mtime = item.stat().st_mtime + age_seconds = current_time - mtime + age_days = age_seconds / 86400 # Convert seconds to days + + # Extract PR number from directory name (format: pr-XXX-sha) + pr_number = None + if item.name.startswith("pr-"): + parts = item.name.split("-") + if len(parts) >= 2: + try: + pr_number = int(parts[1]) + except ValueError: + pass + + worktrees.append( + WorktreeInfo(path=item, age_days=age_days, pr_number=pr_number) + ) + + # Sort by age (oldest first) + worktrees.sort(key=lambda x: x.age_days, reverse=True) + + return worktrees + + def get_registered_worktrees(self) -> set[Path]: + """ + Get set of worktrees registered with git. + + Returns: + Set of resolved Path objects for registered worktrees + """ + try: + result = subprocess.run( + ["git", "worktree", "list", "--porcelain"], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout listing worktrees, returning empty set") + return set() + + registered = set() + for line in result.stdout.split("\n"): + if line.startswith("worktree "): + parts = line.split(" ", 1) + if len(parts) > 1 and parts[1]: + registered.add(Path(parts[1])) + + return registered + + def cleanup_worktrees(self, force: bool = False) -> dict[str, int]: + """ + Clean up PR worktrees based on age and count policies. + + Cleanup order: + 1. Remove orphaned worktrees (not registered with git) + 2. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS + 3. If still over MAX_PR_WORKTREES, remove oldest worktrees + + Args: + force: If True, skip age check and only enforce count limit + + Returns: + Dict with cleanup statistics: { + 'orphaned': count, + 'expired': count, + 'excess': count, + 'total': count + } + """ + stats = {"orphaned": 0, "expired": 0, "excess": 0, "total": 0} + + if not self.worktree_base_dir.exists(): + return stats + + # Get registered worktrees (resolved paths for consistent comparison) + registered = self.get_registered_worktrees() + registered_resolved = {p.resolve() for p in registered} + + # Get all PR worktree info + worktrees = self.get_worktree_info() + + # Phase 1: Remove orphaned worktrees + for wt in worktrees: + if wt.path.resolve() not in registered_resolved: + logger.info( + f"[WorktreeManager] Removing orphaned worktree: {wt.path.name} (age: {wt.age_days:.1f} days)" + ) + shutil.rmtree(wt.path, ignore_errors=True) + stats["orphaned"] += 1 + + # Refresh worktree list after orphan cleanup + try: + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout pruning worktrees, continuing anyway") + + # Refresh registered worktrees after prune (git's internal registry may have changed) + registered_resolved = {p.resolve() for p in self.get_registered_worktrees()} + + # Get fresh worktree info for remaining worktrees (use resolved paths) + worktrees = [ + wt + for wt in self.get_worktree_info() + if wt.path.resolve() in registered_resolved + ] + + # Phase 2: Remove expired worktrees (older than max age) + max_age_days = _get_max_age_days() + if not force: + for wt in worktrees: + if wt.age_days > max_age_days: + logger.info( + f"[WorktreeManager] Removing expired worktree: {wt.path.name} (age: {wt.age_days:.1f} days, max: {max_age_days} days)" + ) + self.remove_worktree(wt.path) + stats["expired"] += 1 + + # Refresh worktree list after expiration cleanup (use resolved paths) + registered_resolved = {p.resolve() for p in self.get_registered_worktrees()} + worktrees = [ + wt + for wt in self.get_worktree_info() + if wt.path.resolve() in registered_resolved + ] + + # Phase 3: Remove excess worktrees (keep only max_pr_worktrees most recent) + max_pr_worktrees = _get_max_pr_worktrees() + if len(worktrees) > max_pr_worktrees: + # worktrees are already sorted by age (oldest first) + excess_count = len(worktrees) - max_pr_worktrees + for wt in worktrees[:excess_count]: + logger.info( + f"[WorktreeManager] Removing excess worktree: {wt.path.name} (count: {len(worktrees)}, max: {max_pr_worktrees})" + ) + self.remove_worktree(wt.path) + stats["excess"] += 1 + + stats["total"] = stats["orphaned"] + stats["expired"] + stats["excess"] + + if stats["total"] > 0: + logger.info( + f"[WorktreeManager] Cleanup complete: {stats['total']} worktrees removed " + f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})" + ) + else: + logger.debug( + f"No cleanup needed (current: {len(worktrees)}, max: {max_pr_worktrees})" + ) + + return stats + + def cleanup_all_worktrees(self) -> int: + """ + Remove ALL PR worktrees (for testing or emergency cleanup). + + Returns: + Number of worktrees removed + """ + if not self.worktree_base_dir.exists(): + return 0 + + worktrees = self.get_worktree_info() + count = 0 + + for wt in worktrees: + logger.info(f"[WorktreeManager] Removing worktree: {wt.path.name}") + self.remove_worktree(wt.path) + count += 1 + + if count > 0: + try: + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout pruning worktrees after cleanup") + logger.info(f"[WorktreeManager] Removed all {count} PR worktrees") + + return count diff --git a/apps/backend/runners/github/services/pydantic_models.py b/apps/backend/runners/github/services/pydantic_models.py index 3c91a219eb..6777e97690 100644 --- a/apps/backend/runners/github/services/pydantic_models.py +++ b/apps/backend/runners/github/services/pydantic_models.py @@ -26,7 +26,7 @@ from typing import Literal -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field # ============================================================================= # Common Finding Types @@ -46,6 +46,10 @@ class BaseFinding(BaseModel): line: int = Field(0, description="Line number of the issue") suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", + ) class SecurityFinding(BaseFinding): @@ -78,9 +82,6 @@ class DeepAnalysisFinding(BaseFinding): "performance", "logic", ] = Field(description="Issue category") - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="AI's confidence in this finding (0.0-1.0)" - ) verification_note: str | None = Field( None, description="What evidence is missing or couldn't be verified" ) @@ -315,21 +316,11 @@ class OrchestratorFinding(BaseModel): description="Issue severity level" ) suggestion: str | None = Field(None, description="How to fix this issue") - confidence: float = Field( - 0.85, - ge=0.0, - le=1.0, - description="Confidence (0.0-1.0 or 0-100, normalized to 0.0-1.0)", + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0).""" - if v > 1: - return v / 100.0 - return float(v) - class OrchestratorReviewResponse(BaseModel): """Complete response schema for orchestrator PR review.""" @@ -355,9 +346,6 @@ class LogicFinding(BaseFinding): category: Literal["logic"] = Field( default="logic", description="Always 'logic' for logic findings" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" - ) example_input: str | None = Field( None, description="Concrete input that triggers the bug" ) @@ -366,14 +354,6 @@ class LogicFinding(BaseFinding): None, description="What the code should produce" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class CodebaseFitFinding(BaseFinding): """A codebase fit finding from the codebase fit review agent.""" @@ -381,9 +361,6 @@ class CodebaseFitFinding(BaseFinding): category: Literal["codebase_fit"] = Field( default="codebase_fit", description="Always 'codebase_fit' for fit findings" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" - ) existing_code: str | None = Field( None, description="Reference to existing code that should be used instead" ) @@ -391,14 +368,6 @@ class CodebaseFitFinding(BaseFinding): None, description="Description of the established pattern being violated" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class ParallelOrchestratorFinding(BaseModel): """A finding from the parallel orchestrator with source agent tracking.""" @@ -423,8 +392,9 @@ class ParallelOrchestratorFinding(BaseModel): severity: Literal["critical", "high", "medium", "low"] = Field( description="Issue severity level" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") @@ -436,14 +406,6 @@ class ParallelOrchestratorFinding(BaseModel): False, description="Whether multiple agents agreed on this finding" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class AgentAgreement(BaseModel): """Tracks agreement between agents on findings.""" @@ -496,22 +458,14 @@ class ResolutionVerification(BaseModel): status: Literal["resolved", "partially_resolved", "unresolved", "cant_verify"] = ( Field(description="Resolution status after AI verification") ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in the resolution status" + evidence: str = Field( + min_length=1, + description="Actual code snippet showing the resolution status. Required.", ) - evidence: str = Field(description="What evidence supports this resolution status") resolution_notes: str | None = Field( None, description="Detailed notes on how the issue was addressed" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class ParallelFollowupFinding(BaseModel): """A finding from parallel follow-up review with source agent tracking.""" @@ -534,8 +488,9 @@ class ParallelFollowupFinding(BaseModel): severity: Literal["critical", "high", "medium", "low"] = Field( description="Issue severity level" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") @@ -546,14 +501,6 @@ class ParallelFollowupFinding(BaseModel): None, description="ID of related previous finding if this is a regression" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class CommentAnalysis(BaseModel): """Analysis of a contributor or AI comment.""" @@ -640,6 +587,9 @@ class FindingValidationResult(BaseModel): The finding-validator agent uses this to report whether a previous finding is a genuine issue or a false positive that should be dismissed. + + EVIDENCE-BASED VALIDATION: No confidence scores - validation is binary. + Either the evidence shows the issue exists, or it doesn't. """ finding_id: str = Field(description="ID of the finding being validated") @@ -648,16 +598,17 @@ class FindingValidationResult(BaseModel): ] = Field( description=( "Validation result: " - "confirmed_valid = issue IS real, keep as unresolved; " - "dismissed_false_positive = original finding was incorrect, remove; " - "needs_human_review = cannot determine with confidence" + "confirmed_valid = code evidence proves issue IS real; " + "dismissed_false_positive = code evidence proves issue does NOT exist; " + "needs_human_review = cannot find definitive evidence either way" ) ) code_evidence: str = Field( min_length=1, description=( "REQUIRED: Exact code snippet examined from the file. " - "Must be actual code, not a description." + "Must be actual code copy-pasted from the file, not a description. " + "This is the proof that determines the validation status." ), ) line_range: tuple[int, int] = Field( @@ -666,27 +617,18 @@ class FindingValidationResult(BaseModel): explanation: str = Field( min_length=20, description=( - "Detailed explanation of why the finding is valid/invalid. " - "Must reference specific code and explain the reasoning." + "Detailed explanation connecting the code_evidence to the validation_status. " + "Must explain: (1) what the original finding claimed, (2) what the actual code shows, " + "(3) why this proves/disproves the issue." ), ) - confidence: float = Field( - ge=0.0, - le=1.0, + evidence_verified_in_file: bool = Field( description=( - "Confidence in the validation result (0.0-1.0). " - "Must be >= 0.80 to dismiss as false positive, >= 0.70 to confirm valid." - ), + "True if the code_evidence was verified to exist at the specified line_range. " + "False if the code couldn't be found (indicates hallucination in original finding)." + ) ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0).""" - if v > 1: - return v / 100.0 - return float(v) - class FindingValidationResponse(BaseModel): """Complete response from the finding-validator agent.""" diff --git a/apps/backend/runners/github/services/response_parsers.py b/apps/backend/runners/github/services/response_parsers.py index db318463d2..2df83ea06b 100644 --- a/apps/backend/runners/github/services/response_parsers.py +++ b/apps/backend/runners/github/services/response_parsers.py @@ -33,8 +33,9 @@ TriageResult, ) -# Confidence threshold for filtering findings (GitHub Copilot standard) -CONFIDENCE_THRESHOLD = 0.80 +# Evidence-based validation replaces confidence scoring +# Findings without evidence are filtered out instead of using confidence thresholds +MIN_EVIDENCE_LENGTH = 20 # Minimum chars for evidence to be considered valid class ResponseParser: @@ -65,9 +66,13 @@ def parse_scan_result(response_text: str) -> dict: @staticmethod def parse_review_findings( - response_text: str, apply_confidence_filter: bool = True + response_text: str, require_evidence: bool = True ) -> list[PRReviewFinding]: - """Parse findings from AI response with optional confidence filtering.""" + """Parse findings from AI response with optional evidence validation. + + Evidence-based validation: Instead of confidence scores, findings + require actual code evidence proving the issue exists. + """ findings = [] try: @@ -77,14 +82,14 @@ def parse_review_findings( if json_match: findings_data = json.loads(json_match.group(1)) for i, f in enumerate(findings_data): - # Get confidence (default to 0.85 if not provided for backward compat) - confidence = float(f.get("confidence", 0.85)) + # Get evidence (code snippet proving the issue) + evidence = f.get("evidence") or f.get("code_snippet") or "" - # Apply confidence threshold filter - if apply_confidence_filter and confidence < CONFIDENCE_THRESHOLD: + # Apply evidence-based validation + if require_evidence and len(evidence.strip()) < MIN_EVIDENCE_LENGTH: print( f"[AI] Dropped finding '{f.get('title', 'unknown')}': " - f"confidence {confidence:.2f} < {CONFIDENCE_THRESHOLD}", + f"insufficient evidence ({len(evidence.strip())} chars < {MIN_EVIDENCE_LENGTH})", flush=True, ) continue @@ -105,8 +110,8 @@ def parse_review_findings( end_line=f.get("end_line"), suggested_fix=f.get("suggested_fix"), fixable=f.get("fixable", False), - # NEW: Support verification and redundancy fields - confidence=confidence, + # Evidence-based validation fields + evidence=evidence if evidence.strip() else None, verification_note=f.get("verification_note"), redundant_with=f.get("redundant_with"), ) diff --git a/apps/backend/runners/github/services/review_tools.py b/apps/backend/runners/github/services/review_tools.py index 881d8353cf..1a53a6b126 100644 --- a/apps/backend/runners/github/services/review_tools.py +++ b/apps/backend/runners/github/services/review_tools.py @@ -140,7 +140,9 @@ async def spawn_security_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text # Parse findings @@ -223,7 +225,9 @@ async def spawn_quality_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text findings = _parse_findings_from_response(result_text, source="quality_agent") @@ -316,7 +320,9 @@ async def spawn_deep_analysis( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text findings = _parse_findings_from_response(result_text, source="deep_analysis") diff --git a/apps/backend/runners/github/services/sdk_utils.py b/apps/backend/runners/github/services/sdk_utils.py index 0e6da74f30..7471f16360 100644 --- a/apps/backend/runners/github/services/sdk_utils.py +++ b/apps/backend/runners/github/services/sdk_utils.py @@ -235,8 +235,9 @@ async def process_sdk_stream( if on_tool_use: on_tool_use(tool_name, tool_id, tool_input) - # Collect text - if hasattr(block, "text"): + # Collect text - must check block type since only TextBlock has .text + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text # Always print text content preview (not just in DEBUG_MODE) text_preview = block.text[:500].replace("\n", " ").strip() diff --git a/apps/backend/runners/github/services/triage_engine.py b/apps/backend/runners/github/services/triage_engine.py index 2508207012..57a6b04310 100644 --- a/apps/backend/runners/github/services/triage_engine.py +++ b/apps/backend/runners/github/services/triage_engine.py @@ -87,7 +87,9 @@ async def triage_single_issue( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text return self.parser.parse_triage_result( diff --git a/apps/backend/runners/gitlab/runner.py b/apps/backend/runners/gitlab/runner.py index c2a0be32a5..d4f61827bb 100644 --- a/apps/backend/runners/gitlab/runner.py +++ b/apps/backend/runners/gitlab/runner.py @@ -26,8 +26,10 @@ # Add backend to path sys.path.insert(0, str(Path(__file__).parent.parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent.parent / ".env" if env_file.exists(): diff --git a/apps/backend/runners/gitlab/services/mr_review_engine.py b/apps/backend/runners/gitlab/services/mr_review_engine.py index d1679a4b62..ef8ef9aaf0 100644 --- a/apps/backend/runners/gitlab/services/mr_review_engine.py +++ b/apps/backend/runners/gitlab/services/mr_review_engine.py @@ -234,7 +234,9 @@ async def run_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text self._report_progress( diff --git a/apps/backend/runners/ideation_runner.py b/apps/backend/runners/ideation_runner.py index 63714a372f..9b91445601 100644 --- a/apps/backend/runners/ideation_runner.py +++ b/apps/backend/runners/ideation_runner.py @@ -26,8 +26,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -94,8 +96,8 @@ def main(): parser.add_argument( "--model", type=str, - default="claude-opus-4-5-20251101", - help="Model to use (default: claude-opus-4-5-20251101)", + default="sonnet", # Changed from "opus" (fix #433) + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/insights_runner.py b/apps/backend/runners/insights_runner.py index a2de9f9408..bd4bf362c4 100644 --- a/apps/backend/runners/insights_runner.py +++ b/apps/backend/runners/insights_runner.py @@ -15,8 +15,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -39,6 +41,7 @@ debug_section, debug_success, ) +from phase_config import resolve_model_id def load_project_context(project_dir: str) -> str: @@ -132,7 +135,7 @@ async def run_with_sdk( project_dir: str, message: str, history: list, - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured thinking_level: str = "medium", ) -> None: """Run the chat using Claude SDK with streaming.""" @@ -180,7 +183,7 @@ async def run_with_sdk( # Create Claude SDK client with appropriate settings for insights client = ClaudeSDKClient( options=ClaudeAgentOptions( - model=model, # Use configured model + model=resolve_model_id(model), # Resolve via API Profile if configured system_prompt=system_prompt, allowed_tools=[ "Read", @@ -336,8 +339,8 @@ def main(): ) parser.add_argument( "--model", - default="claude-sonnet-4-5-20250929", - help="Claude model ID (default: claude-sonnet-4-5-20250929)", + default="sonnet", + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/roadmap/models.py b/apps/backend/runners/roadmap/models.py index cc7a1f5f8b..377f5cfacc 100644 --- a/apps/backend/runners/roadmap/models.py +++ b/apps/backend/runners/roadmap/models.py @@ -23,6 +23,6 @@ class RoadmapConfig: project_dir: Path output_dir: Path - model: str = "claude-opus-4-5-20251101" + model: str = "sonnet" # Changed from "opus" (fix #433) refresh: bool = False # Force regeneration even if roadmap exists enable_competitor_analysis: bool = False # Enable competitor analysis phase diff --git a/apps/backend/runners/roadmap/orchestrator.py b/apps/backend/runners/roadmap/orchestrator.py index b7a9803af1..b49ca2c1cb 100644 --- a/apps/backend/runners/roadmap/orchestrator.py +++ b/apps/backend/runners/roadmap/orchestrator.py @@ -27,7 +27,7 @@ def __init__( self, project_dir: Path, output_dir: Path | None = None, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, enable_competitor_analysis: bool = False, diff --git a/apps/backend/runners/roadmap_runner.py b/apps/backend/runners/roadmap_runner.py index 88f157b12c..06625add7e 100644 --- a/apps/backend/runners/roadmap_runner.py +++ b/apps/backend/runners/roadmap_runner.py @@ -20,8 +20,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -55,8 +57,8 @@ def main(): parser.add_argument( "--model", type=str, - default="claude-opus-4-5-20251101", - help="Model to use (default: claude-opus-4-5-20251101)", + default="sonnet", # Changed from "opus" (fix #433) + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/spec_runner.py b/apps/backend/runners/spec_runner.py index 0bda6db115..4c5a4bba6c 100644 --- a/apps/backend/runners/spec_runner.py +++ b/apps/backend/runners/spec_runner.py @@ -26,11 +26,11 @@ - Risk factors and edge cases Usage: - python auto-claude/spec_runner.py --task "Add user authentication" - python auto-claude/spec_runner.py --interactive - python auto-claude/spec_runner.py --continue 001-feature - python auto-claude/spec_runner.py --task "Fix button color" --complexity simple - python auto-claude/spec_runner.py --task "Simple fix" --no-ai-assessment + python runners/spec_runner.py --task "Add user authentication" + python runners/spec_runner.py --interactive + python runners/spec_runner.py --continue 001-feature + python runners/spec_runner.py --task "Fix button color" --complexity simple + python runners/spec_runner.py --task "Simple fix" --no-ai-assessment """ import sys @@ -81,8 +81,10 @@ # Add auto-claude to path (parent of runners/) sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" dev_env_file = Path(__file__).parent.parent.parent / "dev" / "auto-claude" / ".env" diff --git a/apps/backend/security/__init__.py b/apps/backend/security/__init__.py index 9b389373b6..b26311d292 100644 --- a/apps/backend/security/__init__.py +++ b/apps/backend/security/__init__.py @@ -62,7 +62,9 @@ validate_chmod_command, validate_dropdb_command, validate_dropuser_command, + validate_git_command, validate_git_commit, + validate_git_config, validate_init_script, validate_kill_command, validate_killall_command, @@ -93,7 +95,9 @@ "validate_chmod_command", "validate_rm_command", "validate_init_script", + "validate_git_command", "validate_git_commit", + "validate_git_config", "validate_dropdb_command", "validate_dropuser_command", "validate_psql_command", diff --git a/apps/backend/security/constants.py b/apps/backend/security/constants.py new file mode 100644 index 0000000000..3ddbca3002 --- /dev/null +++ b/apps/backend/security/constants.py @@ -0,0 +1,16 @@ +""" +Security Constants +================== + +Shared constants for the security module. +""" + +# Environment variable name for the project directory +# Set by agents (coder.py, loop.py) at startup to ensure security hooks +# can find the correct project directory even in worktree mode. +PROJECT_DIR_ENV_VAR = "AUTO_CLAUDE_PROJECT_DIR" + +# Security configuration filenames +# These are the files that control which commands are allowed to run. +ALLOWLIST_FILENAME = ".auto-claude-allowlist" +PROFILE_FILENAME = ".auto-claude-security.json" diff --git a/apps/backend/security/git_validators.py b/apps/backend/security/git_validators.py index 5a75ad39f1..5c21d32909 100644 --- a/apps/backend/security/git_validators.py +++ b/apps/backend/security/git_validators.py @@ -2,7 +2,9 @@ Git Validators ============== -Validators for git operations (commit with secret scanning). +Validators for git operations: +- Commit with secret scanning +- Config protection (prevent setting test users) """ import shlex @@ -10,8 +12,203 @@ from .validation_models import ValidationResult +# ============================================================================= +# BLOCKED GIT CONFIG PATTERNS +# ============================================================================= -def validate_git_commit(command_string: str) -> ValidationResult: +# Git config keys that agents must NOT modify +# These are identity settings that should inherit from the user's global config +# +# NOTE: This validation covers command-line arguments (git config, git -c). +# Environment variables (GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, GIT_COMMITTER_NAME, +# GIT_COMMITTER_EMAIL) are NOT validated here as they require pre-execution +# environment filtering, which is handled at the sandbox/hook level. +BLOCKED_GIT_CONFIG_KEYS = { + "user.name", + "user.email", + "author.name", + "author.email", + "committer.name", + "committer.email", +} + + +def validate_git_config(command_string: str) -> ValidationResult: + """ + Validate git config commands - block identity changes. + + Agents should not set user.name, user.email, etc. as this: + 1. Breaks commit attribution + 2. Can create fake "Test User" identities + 3. Overrides the user's legitimate git identity + + Args: + command_string: The full git command string + + Returns: + Tuple of (is_valid, error_message) + """ + try: + tokens = shlex.split(command_string) + except ValueError: + return False, "Could not parse git command" # Fail closed on parse errors + + if len(tokens) < 2 or tokens[0] != "git" or tokens[1] != "config": + return True, "" # Not a git config command + + # Check for read-only operations first - these are always allowed + # --get, --get-all, --get-regexp, --list are all read operations + read_only_flags = {"--get", "--get-all", "--get-regexp", "--list", "-l"} + for token in tokens[2:]: + if token in read_only_flags: + return True, "" # Read operation, allow it + + # Extract the config key from the command + # git config [options] [value] - key is typically after config and any options + config_key = None + for token in tokens[2:]: + # Skip options (start with -) + if token.startswith("-"): + continue + # First non-option token is the config key + config_key = token.lower() + break + + if not config_key: + return True, "" # No config key specified (e.g., git config --list) + + # Check if the exact config key is blocked + for blocked_key in BLOCKED_GIT_CONFIG_KEYS: + if config_key == blocked_key: + return False, ( + f"BLOCKED: Cannot modify git identity configuration\n\n" + f"You attempted to set '{blocked_key}' which is not allowed.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the user's " + f"global git configuration. Setting fake identities like 'Test User' breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Simply commit without setting any user configuration. " + f"The repository will use the correct identity automatically." + ) + + return True, "" + + +def validate_git_inline_config(tokens: list[str]) -> ValidationResult: + """ + Check for blocked config keys passed via git -c flag. + + Git allows inline config with: git -c key=value + This bypasses 'git config' validation, so we must check all git commands + for -c flags containing blocked identity keys. + + Args: + tokens: Parsed command tokens + + Returns: + Tuple of (is_valid, error_message) + """ + i = 1 # Start after 'git' + while i < len(tokens): + token = tokens[i] + + # Check for -c flag (can be "-c key=value" or "-c" "key=value") + if token == "-c": + # Next token should be the key=value + if i + 1 < len(tokens): + config_pair = tokens[i + 1] + # Extract the key from key=value + if "=" in config_pair: + config_key = config_pair.split("=", 1)[0].lower() + if config_key in BLOCKED_GIT_CONFIG_KEYS: + return False, ( + f"BLOCKED: Cannot set git identity via -c flag\n\n" + f"You attempted to use '-c {config_pair}' which sets a blocked " + f"identity configuration.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the " + f"user's global git configuration. Setting fake identities breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Remove the -c flag and commit normally. " + f"The repository will use the correct identity automatically." + ) + i += 2 # Skip -c and its value + continue + elif token.startswith("-c"): + # Handle -ckey=value format (no space) + config_pair = token[2:] # Remove "-c" prefix + if "=" in config_pair: + config_key = config_pair.split("=", 1)[0].lower() + if config_key in BLOCKED_GIT_CONFIG_KEYS: + return False, ( + f"BLOCKED: Cannot set git identity via -c flag\n\n" + f"You attempted to use '{token}' which sets a blocked " + f"identity configuration.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the " + f"user's global git configuration. Setting fake identities breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Remove the -c flag and commit normally. " + f"The repository will use the correct identity automatically." + ) + + i += 1 + + return True, "" + + +def validate_git_command(command_string: str) -> ValidationResult: + """ + Main git validator that checks all git security rules. + + Currently validates: + - git -c: Block identity changes via inline config on ANY git command + - git config: Block identity changes + - git commit: Run secret scanning + + Args: + command_string: The full git command string + + Returns: + Tuple of (is_valid, error_message) + """ + try: + tokens = shlex.split(command_string) + except ValueError: + return False, "Could not parse git command" + + if not tokens or tokens[0] != "git": + return True, "" + + if len(tokens) < 2: + return True, "" # Just "git" with no subcommand + + # Check for blocked -c flags on ANY git command (security bypass prevention) + is_valid, error_msg = validate_git_inline_config(tokens) + if not is_valid: + return is_valid, error_msg + + # Find the actual subcommand (skip global options like -c, -C, --git-dir, etc.) + subcommand = None + for token in tokens[1:]: + # Skip options and their values + if token.startswith("-"): + continue + subcommand = token + break + + if not subcommand: + return True, "" # No subcommand found + + # Check git config commands + if subcommand == "config": + return validate_git_config(command_string) + + # Check git commit commands (secret scanning) + if subcommand == "commit": + return validate_git_commit_secrets(command_string) + + return True, "" + + +def validate_git_commit_secrets(command_string: str) -> ValidationResult: """ Validate git commit commands - run secret scan before allowing commit. @@ -99,3 +296,8 @@ def validate_git_commit(command_string: str) -> ValidationResult: ) return False, "\n".join(error_lines) + + +# Backwards compatibility alias - the registry uses this name +# Now delegates to the comprehensive validator +validate_git_commit = validate_git_command diff --git a/apps/backend/security/hooks.py b/apps/backend/security/hooks.py index 35152d4433..4bc7328d3a 100644 --- a/apps/backend/security/hooks.py +++ b/apps/backend/security/hooks.py @@ -66,10 +66,20 @@ async def bash_security_hook( return {} # Get the working directory from context or use current directory - # In the actual client, this would be set by the ClaudeSDKClient - cwd = os.getcwd() - if context and hasattr(context, "cwd"): + # Priority: + # 1. Environment variable PROJECT_DIR_ENV_VAR (set by agent on startup) + # 2. input_data cwd (passed by SDK in the tool call) + # 3. Context cwd (should be set by ClaudeSDKClient but sometimes isn't) + # 4. Current working directory (fallback, may be incorrect in worktree mode) + from .constants import PROJECT_DIR_ENV_VAR + + cwd = os.environ.get(PROJECT_DIR_ENV_VAR) + if not cwd: + cwd = input_data.get("cwd") + if not cwd and context and hasattr(context, "cwd"): cwd = context.cwd + if not cwd: + cwd = os.getcwd() # Get or create security profile # Note: In actual use, spec_dir would be passed through context diff --git a/apps/backend/security/profile.py b/apps/backend/security/profile.py index da75cff174..a3087a65bb 100644 --- a/apps/backend/security/profile.py +++ b/apps/backend/security/profile.py @@ -9,11 +9,12 @@ from pathlib import Path from project_analyzer import ( - ProjectAnalyzer, SecurityProfile, get_or_create_profile, ) +from .constants import ALLOWLIST_FILENAME, PROFILE_FILENAME + # ============================================================================= # GLOBAL STATE # ============================================================================= @@ -23,18 +24,33 @@ _cached_project_dir: Path | None = None _cached_spec_dir: Path | None = None # Track spec directory for cache key _cached_profile_mtime: float | None = None # Track file modification time +_cached_allowlist_mtime: float | None = None # Track allowlist modification time def _get_profile_path(project_dir: Path) -> Path: """Get the security profile file path for a project.""" - return project_dir / ProjectAnalyzer.PROFILE_FILENAME + return project_dir / PROFILE_FILENAME + + +def _get_allowlist_path(project_dir: Path) -> Path: + """Get the allowlist file path for a project.""" + return project_dir / ALLOWLIST_FILENAME def _get_profile_mtime(project_dir: Path) -> float | None: """Get the modification time of the security profile file, or None if not exists.""" profile_path = _get_profile_path(project_dir) try: - return profile_path.stat().st_mtime if profile_path.exists() else None + return profile_path.stat().st_mtime + except OSError: + return None + + +def _get_allowlist_mtime(project_dir: Path) -> float | None: + """Get the modification time of the allowlist file, or None if not exists.""" + allowlist_path = _get_allowlist_path(project_dir) + try: + return allowlist_path.stat().st_mtime except OSError: return None @@ -49,6 +65,7 @@ def get_security_profile( - The project directory changes - The security profile file is created (was None, now exists) - The security profile file is modified (mtime changed) + - The allowlist file is created, modified, or deleted Args: project_dir: Project root directory @@ -57,7 +74,11 @@ def get_security_profile( Returns: SecurityProfile for the project """ - global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime + global _cached_profile + global _cached_project_dir + global _cached_spec_dir + global _cached_profile_mtime + global _cached_allowlist_mtime project_dir = Path(project_dir).resolve() resolved_spec_dir = Path(spec_dir).resolve() if spec_dir else None @@ -68,30 +89,40 @@ def get_security_profile( and _cached_project_dir == project_dir and _cached_spec_dir == resolved_spec_dir ): - # Check if file has been created or modified since caching - current_mtime = _get_profile_mtime(project_dir) - # Cache is valid if: - # - Both are None (file never existed and still doesn't) - # - Both have same mtime (file unchanged) - if current_mtime == _cached_profile_mtime: + # Check if files have been created or modified since caching + current_profile_mtime = _get_profile_mtime(project_dir) + current_allowlist_mtime = _get_allowlist_mtime(project_dir) + + # Cache is valid if both mtimes are unchanged + if ( + current_profile_mtime == _cached_profile_mtime + and current_allowlist_mtime == _cached_allowlist_mtime + ): return _cached_profile - # File was created or modified - invalidate cache - # (This happens when analyzer creates the file after agent starts) + # File was created, modified, or deleted - invalidate cache + # (This happens when analyzer creates the file after agent starts, + # or when user adds/updates the allowlist) # Analyze and cache _cached_profile = get_or_create_profile(project_dir, spec_dir) _cached_project_dir = project_dir _cached_spec_dir = resolved_spec_dir _cached_profile_mtime = _get_profile_mtime(project_dir) + _cached_allowlist_mtime = _get_allowlist_mtime(project_dir) return _cached_profile def reset_profile_cache() -> None: """Reset the cached profile (useful for testing or re-analysis).""" - global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime + global _cached_profile + global _cached_project_dir + global _cached_spec_dir + global _cached_profile_mtime + global _cached_allowlist_mtime _cached_profile = None _cached_project_dir = None _cached_spec_dir = None _cached_profile_mtime = None + _cached_allowlist_mtime = None diff --git a/apps/backend/security/validator.py b/apps/backend/security/validator.py index 7727f012fa..c1ca28983a 100644 --- a/apps/backend/security/validator.py +++ b/apps/backend/security/validator.py @@ -33,7 +33,11 @@ validate_init_script, validate_rm_command, ) -from .git_validators import validate_git_commit +from .git_validators import ( + validate_git_command, + validate_git_commit, + validate_git_config, +) from .process_validators import ( validate_kill_command, validate_killall_command, @@ -60,6 +64,8 @@ "validate_init_script", # Git validators "validate_git_commit", + "validate_git_command", + "validate_git_config", # Database validators "validate_dropdb_command", "validate_dropuser_command", diff --git a/apps/backend/spec/compaction.py b/apps/backend/spec/compaction.py index d74b377ce2..9538585ec3 100644 --- a/apps/backend/spec/compaction.py +++ b/apps/backend/spec/compaction.py @@ -16,7 +16,7 @@ async def summarize_phase_output( phase_name: str, phase_output: str, - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured target_words: int = 500, ) -> str: """ @@ -73,9 +73,12 @@ async def summarize_phase_output( await client.query(prompt) response_text = "" async for msg in client.receive_response(): - if hasattr(msg, "content"): + msg_type = type(msg).__name__ + if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text return response_text.strip() except Exception as e: diff --git a/apps/backend/spec/pipeline/orchestrator.py b/apps/backend/spec/pipeline/orchestrator.py index 76c04d4719..3396f905bd 100644 --- a/apps/backend/spec/pipeline/orchestrator.py +++ b/apps/backend/spec/pipeline/orchestrator.py @@ -57,7 +57,7 @@ def __init__( spec_name: str | None = None, spec_dir: Path | None = None, # Use existing spec directory (for UI integration) - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured thinking_level: str = "medium", # Thinking level for extended thinking complexity_override: str | None = None, # Force a specific complexity use_ai_assessment: bool = True, # Use AI for complexity assessment (vs heuristics) @@ -173,10 +173,11 @@ async def _store_phase_summary(self, phase_name: str) -> None: return # Summarize the output + # Use sonnet shorthand - will resolve via API Profile if configured summary = await summarize_phase_output( phase_name, phase_output, - model="claude-sonnet-4-5-20250929", # Use Sonnet for efficiency + model="sonnet", target_words=500, ) diff --git a/apps/backend/task_logger/capture.py b/apps/backend/task_logger/capture.py index 346011e20f..f96d893f49 100644 --- a/apps/backend/task_logger/capture.py +++ b/apps/backend/task_logger/capture.py @@ -88,17 +88,20 @@ def process_message( inp = block.input if isinstance(inp, dict): # Extract meaningful input description + # Increased limits to avoid hiding critical information if "pattern" in inp: tool_input = f"pattern: {inp['pattern']}" elif "file_path" in inp: fp = inp["file_path"] - if len(fp) > 50: - fp = "..." + fp[-47:] + # Show last 200 chars for paths (enough for most file paths) + if len(fp) > 200: + fp = "..." + fp[-197:] tool_input = fp elif "command" in inp: cmd = inp["command"] - if len(cmd) > 50: - cmd = cmd[:47] + "..." + # Show first 300 chars for commands (enough for most commands) + if len(cmd) > 300: + cmd = cmd[:297] + "..." tool_input = cmd elif "path" in inp: tool_input = inp["path"] diff --git a/apps/backend/task_logger/logger.py b/apps/backend/task_logger/logger.py index 884bb90cea..954814464c 100644 --- a/apps/backend/task_logger/logger.py +++ b/apps/backend/task_logger/logger.py @@ -406,10 +406,10 @@ def tool_start( """ phase_key = (phase or self.current_phase or LogPhase.CODING).value - # Truncate long inputs for display + # Truncate long inputs for display (increased limit to avoid hiding critical info) display_input = tool_input - if display_input and len(display_input) > 100: - display_input = display_input[:97] + "..." + if display_input and len(display_input) > 300: + display_input = display_input[:297] + "..." entry = LogEntry( timestamp=self._timestamp(), @@ -462,10 +462,10 @@ def tool_end( """ phase_key = (phase or self.current_phase or LogPhase.CODING).value - # Truncate long results for display + # Truncate long results for display (increased limit to avoid hiding critical info) display_result = result - if display_result and len(display_result) > 100: - display_result = display_result[:97] + "..." + if display_result and len(display_result) > 300: + display_result = display_result[:297] + "..." status = "Done" if success else "Error" content = f"[{tool_name}] {status}" diff --git a/apps/backend/ui/boxes.py b/apps/backend/ui/boxes.py index 317c4a913f..27921ed29f 100644 --- a/apps/backend/ui/boxes.py +++ b/apps/backend/ui/boxes.py @@ -95,11 +95,54 @@ def box( for line in content: # Strip ANSI for length calculation visible_line = re.sub(r"\033\[[0-9;]*m", "", line) - padding = inner_width - len(visible_line) - 2 # -2 for padding spaces + visible_len = len(visible_line) + padding = inner_width - visible_len - 2 # -2 for padding spaces + if padding < 0: - # Truncate if too long - line = line[: inner_width - 5] + "..." - padding = 0 + # Line is too long - need to truncate intelligently + # Calculate how much to remove (visible characters only) + chars_to_remove = abs(padding) + 3 # +3 for "..." + target_len = visible_len - chars_to_remove + + if target_len <= 0: + # Line is way too long, just show "..." + line = "..." + padding = inner_width - 5 # 3 for "..." + 2 for padding + else: + # Truncate the visible text, preserving ANSI codes for what remains + # Split line into segments (ANSI code vs text) + segments = re.split(r"(\033\[[0-9;]*m)", line) + visible_chars = 0 + result_segments = [] + + for segment in segments: + if re.match(r"\033\[[0-9;]*m", segment): + # ANSI code - include it without counting + result_segments.append(segment) + else: + # Text segment - count visible characters + remaining_space = target_len - visible_chars + if remaining_space <= 0: + break + if len(segment) <= remaining_space: + result_segments.append(segment) + visible_chars += len(segment) + else: + # Truncate this segment at word boundary if possible + truncated = segment[:remaining_space] + # Try to truncate at last space to avoid mid-word cuts + last_space = truncated.rfind(" ") + if ( + last_space > remaining_space * 0.7 + ): # Only if space is in last 30% + truncated = truncated[:last_space] + result_segments.append(truncated) + visible_chars += len(truncated) + break + + line = "".join(result_segments) + "..." + padding = 0 + lines.append(v + " " + line + " " * (padding + 1) + v) # Bottom border diff --git a/apps/backend/ui/capabilities.py b/apps/backend/ui/capabilities.py index ac8de510d0..26390abbf5 100644 --- a/apps/backend/ui/capabilities.py +++ b/apps/backend/ui/capabilities.py @@ -13,6 +13,61 @@ import sys +def enable_windows_ansi_support() -> bool: + """ + Enable ANSI escape sequence support on Windows. + + Windows 10 (build 10586+) supports ANSI escape sequences natively, + but they must be explicitly enabled via the Windows API. + + Returns: + True if ANSI support was enabled, False otherwise + """ + if sys.platform != "win32": + return True # Non-Windows always has ANSI support + + try: + import ctypes + from ctypes import wintypes + + # Windows constants + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + + kernel32 = ctypes.windll.kernel32 + + # Get handles + for handle_id in (STD_OUTPUT_HANDLE, STD_ERROR_HANDLE): + handle = kernel32.GetStdHandle(handle_id) + if handle == -1: + continue + + # Get current console mode + mode = wintypes.DWORD() + if not kernel32.GetConsoleMode(handle, ctypes.byref(mode)): + continue + + # Enable ANSI support if not already enabled + if not (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING): + kernel32.SetConsoleMode( + handle, mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING + ) + + return True + except (ImportError, AttributeError, OSError): + # Fall back to colorama if available + try: + import colorama + + colorama.init() + return True + except ImportError: + pass + + return False + + def configure_safe_encoding() -> None: """ Configure stdout/stderr to handle Unicode safely on Windows. @@ -54,8 +109,9 @@ def configure_safe_encoding() -> None: pass -# Configure safe encoding on module import +# Configure safe encoding and ANSI support on module import configure_safe_encoding() +WINDOWS_ANSI_ENABLED = enable_windows_ansi_support() def _is_fancy_ui_enabled() -> bool: diff --git a/apps/frontend/.env.example b/apps/frontend/.env.example index f01b56f27a..d5d246749d 100644 --- a/apps/frontend/.env.example +++ b/apps/frontend/.env.example @@ -19,6 +19,34 @@ # Shows detailed information about app update checks and downloads # DEBUG_UPDATER=true +# ============================================ +# SENTRY ERROR REPORTING +# ============================================ + +# Sentry DSN for anonymous error reporting +# If not set, error reporting is completely disabled (safe for forks) +# +# For official builds: Set in CI/CD secrets +# For local testing: Uncomment and add your DSN +# +# SENTRY_DSN=https://your-dsn@sentry.io/project-id + +# Force enable Sentry in development mode (normally disabled in dev) +# Only works when SENTRY_DSN is also set +# SENTRY_DEV=true + +# Trace sample rate for performance monitoring (0.0 to 1.0) +# Controls what percentage of transactions are sampled +# Default: 0.1 (10%) in production, 0 in development +# Set to 0 to disable performance monitoring entirely +# SENTRY_TRACES_SAMPLE_RATE=0.1 + +# Profile sample rate for profiling (0.0 to 1.0) +# Controls what percentage of sampled transactions include profiling data +# Default: 0.1 (10%) in production, 0 in development +# Set to 0 to disable profiling entirely +# SENTRY_PROFILES_SAMPLE_RATE=0.1 + # ============================================ # HOW TO USE # ============================================ diff --git a/apps/frontend/package-lock.json b/apps/frontend/package-lock.json index 9abc6c3090..e81abc2d9b 100644 --- a/apps/frontend/package-lock.json +++ b/apps/frontend/package-lock.json @@ -32,38 +32,38 @@ "@radix-ui/react-tooltip": "^1.2.8", "@tailwindcss/typography": "^0.5.19", "@tanstack/react-virtual": "^3.13.13", - "@xterm/addon-fit": "^0.11.0", - "@xterm/addon-serialize": "^0.14.0", - "@xterm/addon-web-links": "^0.12.0", - "@xterm/addon-webgl": "^0.19.0", - "@xterm/xterm": "^6.0.0", + "@xterm/addon-fit": "^0.10.0", + "@xterm/addon-serialize": "^0.13.0", + "@xterm/addon-web-links": "^0.11.0", + "@xterm/addon-webgl": "^0.18.0", + "@xterm/xterm": "^5.5.0", "chokidar": "^5.0.0", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", "i18next": "^25.7.3", - "lucide-react": "^0.562.0", + "lucide-react": "^0.560.0", "motion": "^12.23.26", "react": "^19.2.3", "react-dom": "^19.2.3", "react-i18next": "^16.5.0", "react-markdown": "^10.1.0", - "react-resizable-panels": "^4.2.0", + "react-resizable-panels": "^3.0.6", "remark-gfm": "^4.0.1", "semver": "^7.7.3", "tailwind-merge": "^3.4.0", "uuid": "^13.0.0", - "zod": "^4.2.1", "zustand": "^5.0.9" }, "devDependencies": { "@electron-toolkit/preload": "^3.0.2", "@electron-toolkit/utils": "^4.0.0", - "@electron/rebuild": "^4.0.2", + "@electron/rebuild": "^3.7.1", "@eslint/js": "^9.39.1", "@playwright/test": "^1.52.0", "@tailwindcss/postcss": "^4.1.17", + "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.1.0", "@types/node": "^25.0.0", "@types/react": "^19.2.7", @@ -72,33 +72,32 @@ "@types/uuid": "^10.0.0", "@vitejs/plugin-react": "^5.1.2", "autoprefixer": "^10.4.22", - "cross-env": "^10.1.0", "electron": "^39.2.7", "electron-builder": "^26.0.12", "electron-vite": "^5.0.0", "eslint": "^9.39.1", "eslint-plugin-react": "^7.37.5", "eslint-plugin-react-hooks": "^7.0.1", - "globals": "^17.0.0", + "globals": "^16.5.0", "husky": "^9.1.7", - "jsdom": "^27.3.0", + "jsdom": "^26.0.0", "lint-staged": "^16.2.7", "postcss": "^8.5.6", "tailwindcss": "^4.1.17", "typescript": "^5.9.3", - "typescript-eslint": "^8.50.1", + "typescript-eslint": "^8.49.0", "vite": "^7.2.7", - "vitest": "^4.0.16" + "vitest": "^4.0.15" }, "engines": { "node": ">=24.0.0", "npm": ">=10.0.0" } }, - "node_modules/@acemir/cssom": { - "version": "0.9.30", - "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.30.tgz", - "integrity": "sha512-9CnlMCI0LmCIq0olalQqdWrJHPzm0/tw3gzOA9zJSgvFX7Xau3D24mAGa4BtwxwY69nsuJW6kQqqCzf/mEcQgg==", + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", "dev": true, "license": "MIT" }, @@ -116,59 +115,25 @@ } }, "node_modules/@asamuzakjp/css-color": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz", - "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", + "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", "dev": true, "license": "MIT", "dependencies": { - "@csstools/css-calc": "^2.1.4", - "@csstools/css-color-parser": "^3.1.0", - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4", - "lru-cache": "^11.2.4" + "@csstools/css-calc": "^2.1.3", + "@csstools/css-color-parser": "^3.0.9", + "@csstools/css-parser-algorithms": "^3.0.4", + "@csstools/css-tokenizer": "^3.0.3", + "lru-cache": "^10.4.3" } }, "node_modules/@asamuzakjp/css-color/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@asamuzakjp/dom-selector": { - "version": "6.7.6", - "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz", - "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@asamuzakjp/nwsapi": "^2.3.9", - "bidi-js": "^1.0.3", - "css-tree": "^3.1.0", - "is-potential-custom-element-name": "^1.0.1", - "lru-cache": "^11.2.4" - } - }, - "node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@asamuzakjp/nwsapi": { - "version": "2.3.9", - "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", - "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", "dev": true, - "license": "MIT" + "license": "ISC" }, "node_modules/@babel/code-frame": { "version": "7.27.1", @@ -592,26 +557,6 @@ "@csstools/css-tokenizer": "^3.0.4" } }, - "node_modules/@csstools/css-syntax-patches-for-csstree": { - "version": "1.0.22", - "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.22.tgz", - "integrity": "sha512-qBcx6zYlhleiFfdtzkRgwNC7VVoAwfK76Vmsw5t+PbvtdknO9StgRk7ROvq9so1iqbdW4uLIDAsXRsTfUrIoOw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - } - }, "node_modules/@csstools/css-tokenizer": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", @@ -741,6 +686,28 @@ "node": ">=10.12.0" } }, + "node_modules/@electron/asar/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/@electron/asar/node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -785,29 +752,6 @@ "node": ">=10" } }, - "node_modules/@electron/fuses/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/fuses/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@electron/get": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.3.tgz", @@ -830,6 +774,31 @@ "global-agent": "^3.0.0" } }, + "node_modules/@electron/get/node_modules/fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", + "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, + "node_modules/@electron/get/node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "dev": true, + "license": "MIT", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, "node_modules/@electron/get/node_modules/semver": { "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", @@ -840,6 +809,16 @@ "semver": "bin/semver.js" } }, + "node_modules/@electron/get/node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/@electron/node-gyp": { "version": "10.2.0-electron.1", "resolved": "git+ssh://git@github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", @@ -865,581 +844,99 @@ "node": ">=12.13.0" } }, - "node_modules/@electron/node-gyp/node_modules/@npmcli/fs": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz", - "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "@gar/promisify": "^1.1.3", - "semver": "^7.3.5" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true, - "license": "ISC" - }, - "node_modules/@electron/node-gyp/node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "node_modules/@electron/notarize": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz", + "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==", "dev": true, "license": "MIT", "dependencies": { - "debug": "4" + "debug": "^4.1.1", + "fs-extra": "^9.0.1", + "promise-retry": "^2.0.1" }, "engines": { - "node": ">= 6.0.0" + "node": ">= 10.0.0" } }, - "node_modules/@electron/node-gyp/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "node_modules/@electron/notarize/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", "dev": true, "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/cacache": { - "version": "16.1.3", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz", - "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^2.1.0", - "@npmcli/move-file": "^2.0.0", - "chownr": "^2.0.0", - "fs-minipass": "^2.1.0", - "glob": "^8.0.1", - "infer-owner": "^1.0.4", - "lru-cache": "^7.7.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "mkdirp": "^1.0.4", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^9.0.0", - "tar": "^6.1.11", - "unique-filename": "^2.0.0" + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": ">=10" } }, - "node_modules/@electron/node-gyp/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "node_modules/@electron/osx-sign": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz", + "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==", "dev": true, - "license": "ISC", + "license": "BSD-2-Clause", "dependencies": { - "minipass": "^3.0.0" + "compare-version": "^0.1.2", + "debug": "^4.3.4", + "fs-extra": "^10.0.0", + "isbinaryfile": "^4.0.8", + "minimist": "^1.2.6", + "plist": "^3.0.5" + }, + "bin": { + "electron-osx-flat": "bin/electron-osx-flat.js", + "electron-osx-sign": "bin/electron-osx-sign.js" }, "engines": { - "node": ">= 8" + "node": ">=12.0.0" } }, - "node_modules/@electron/node-gyp/node_modules/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "deprecated": "Glob versions prior to v9 are no longer supported", + "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", + "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - }, + "license": "MIT", "engines": { - "node": ">=12" + "node": ">= 8.0.0" }, "funding": { - "url": "https://github.com/sponsors/isaacs" + "url": "https://github.com/sponsors/gjtorikian/" } }, - "node_modules/@electron/node-gyp/node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "node_modules/@electron/rebuild": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.2.tgz", + "integrity": "sha512-19/KbIR/DAxbsCkiaGMXIdPnMCJLkcf8AvGnduJtWBs/CBwiAjY1apCqOLVxrXg+rtXFCngbXhBanWjxLUt1Mg==", "dev": true, "license": "MIT", "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "@electron/node-gyp": "git+https://github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", + "@malept/cross-spawn-promise": "^2.0.0", + "chalk": "^4.0.0", + "debug": "^4.1.1", + "detect-libc": "^2.0.1", + "fs-extra": "^10.0.0", + "got": "^11.7.0", + "node-abi": "^3.45.0", + "node-api-version": "^0.2.0", + "ora": "^5.1.0", + "read-binary-file-arch": "^1.0.6", + "semver": "^7.3.5", + "tar": "^6.0.5", + "yargs": "^17.0.1" + }, + "bin": { + "electron-rebuild": "lib/cli.js" }, "engines": { - "node": ">= 6" - } - }, - "node_modules/@electron/node-gyp/node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/@electron/node-gyp/node_modules/lru-cache": { - "version": "7.18.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", - "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/@electron/node-gyp/node_modules/make-fetch-happen": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", - "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", - "dev": true, - "license": "ISC", - "dependencies": { - "agentkeepalive": "^4.2.1", - "cacache": "^16.1.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^7.7.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^2.0.3", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^7.0.0", - "ssri": "^9.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass-fetch": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz", - "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "minipass": "^3.1.6", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - } - }, - "node_modules/@electron/node-gyp/node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "license": "MIT", - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@electron/node-gyp/node_modules/negotiator": { - "version": "0.6.4", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", - "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/@electron/node-gyp/node_modules/nopt": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", - "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", - "dev": true, - "license": "ISC", - "dependencies": { - "abbrev": "^1.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@electron/node-gyp/node_modules/proc-log": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz", - "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==", - "dev": true, - "license": "ISC", - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@electron/node-gyp/node_modules/socks-proxy-agent": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", - "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@electron/node-gyp/node_modules/ssri": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", - "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.1.1" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/unique-filename": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz", - "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", - "dev": true, - "license": "ISC", - "dependencies": { - "unique-slug": "^3.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/unique-slug": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz", - "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", - "dev": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, - "node_modules/@electron/notarize": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz", - "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.1", - "promise-retry": "^2.0.1" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/notarize/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/notarize/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/notarize/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/osx-sign": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz", - "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "compare-version": "^0.1.2", - "debug": "^4.3.4", - "fs-extra": "^10.0.0", - "isbinaryfile": "^4.0.8", - "minimist": "^1.2.6", - "plist": "^3.0.5" - }, - "bin": { - "electron-osx-flat": "bin/electron-osx-flat.js", - "electron-osx-sign": "bin/electron-osx-sign.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/@electron/osx-sign/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", - "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/@electron/osx-sign/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/osx-sign/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/rebuild": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-4.0.2.tgz", - "integrity": "sha512-8iZWVPvOpCdIc5Pj5udQV3PeO7liJVC7BBUSizl1HCfP7ZxYc9Kqz0c3PDNj2HQ5cQfJ5JaBeJIYKPjAvLn2Rg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@malept/cross-spawn-promise": "^2.0.0", - "debug": "^4.1.1", - "detect-libc": "^2.0.1", - "got": "^11.7.0", - "graceful-fs": "^4.2.11", - "node-abi": "^4.2.0", - "node-api-version": "^0.2.1", - "node-gyp": "^11.2.0", - "ora": "^5.1.0", - "read-binary-file-arch": "^1.0.6", - "semver": "^7.3.5", - "tar": "^6.0.5", - "yargs": "^17.0.1" - }, - "bin": { - "electron-rebuild": "lib/cli.js" - }, - "engines": { - "node": ">=22.12.0" + "node": ">=12.13.0" } }, "node_modules/@electron/universal": { @@ -1472,9 +969,9 @@ } }, "node_modules/@electron/universal/node_modules/fs-extra": { - "version": "11.3.3", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", - "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", "dev": true, "license": "MIT", "dependencies": { @@ -1486,19 +983,6 @@ "node": ">=14.14" } }, - "node_modules/@electron/universal/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, "node_modules/@electron/universal/node_modules/minimatch": { "version": "9.0.5", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", @@ -1515,16 +999,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@electron/universal/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@electron/windows-sign": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.2.tgz", @@ -1548,56 +1022,22 @@ } }, "node_modules/@electron/windows-sign/node_modules/fs-extra": { - "version": "11.3.3", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", - "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/@electron/windows-sign/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/windows-sign/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", "dev": true, "license": "MIT", "optional": true, "peer": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, "engines": { - "node": ">= 10.0.0" + "node": ">=14.14" } }, - "node_modules/@epic-web/invariant": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@epic-web/invariant/-/invariant-1.0.0.tgz", - "integrity": "sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==", - "dev": true, - "license": "MIT" - }, "node_modules/@esbuild/aix-ppc64": { "version": "0.25.12", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", @@ -2041,9 +1481,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", - "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", "dev": true, "license": "MIT", "dependencies": { @@ -2223,24 +1663,6 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@exodus/bytes": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.7.0.tgz", - "integrity": "sha512-5i+BtvujK/vM07YCGDyz4C4AyDzLmhxHMtM5HpUyPRtJPBdFPsj290ffXW+UXY21/G7GtXeHD2nRmq0T1ShyQQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - }, - "peerDependencies": { - "@exodus/crypto": "^1.0.0-rc.4" - }, - "peerDependenciesMeta": { - "@exodus/crypto": { - "optional": true - } - } - }, "node_modules/@floating-ui/core": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", @@ -2379,6 +1801,19 @@ "node": ">=12" } }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/@isaacs/cliui/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -2417,6 +1852,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { "version": "8.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", @@ -2435,19 +1886,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/@isaacs/fs-minipass": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", - "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.4" - }, - "engines": { - "node": ">=18.0.0" - } - }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", @@ -2645,64 +2083,18 @@ "node": ">=10" } }, - "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@npmcli/agent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz", - "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==", - "dev": true, - "license": "ISC", - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/@npmcli/agent/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, "node_modules/@npmcli/fs": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz", - "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz", + "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", "dev": true, "license": "ISC", "dependencies": { + "@gar/promisify": "^1.1.3", "semver": "^7.3.5" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/@npmcli/move-file": { @@ -2720,23 +2112,6 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/@npmcli/move-file/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -3995,9 +3370,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz", - "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.4.tgz", + "integrity": "sha512-PWU3Y92H4DD0bOqorEPp1Y0tbzwAurFmIYpjcObv5axGVOtcTlB0b2UKMd2echo08MgN7jO8WQZSSysvfisFSQ==", "cpu": [ "arm" ], @@ -4009,9 +3384,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz", - "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.4.tgz", + "integrity": "sha512-Gw0/DuVm3rGsqhMGYkSOXXIx20cC3kTlivZeuaGt4gEgILivykNyBWxeUV5Cf2tDA2nPLah26vq3emlRrWVbng==", "cpu": [ "arm64" ], @@ -4023,9 +3398,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz", - "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.4.tgz", + "integrity": "sha512-+w06QvXsgzKwdVg5qRLZpTHh1bigHZIqoIUPtiqh05ZiJVUQ6ymOxaPkXTvRPRLH88575ZCRSRM3PwIoNma01Q==", "cpu": [ "arm64" ], @@ -4037,9 +3412,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz", - "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.4.tgz", + "integrity": "sha512-EB4Na9G2GsrRNRNFPuxfwvDRDUwQEzJPpiK1vo2zMVhEeufZ1k7J1bKnT0JYDfnPC7RNZ2H5YNQhW6/p2QKATw==", "cpu": [ "x64" ], @@ -4051,9 +3426,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz", - "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.4.tgz", + "integrity": "sha512-bldA8XEqPcs6OYdknoTMaGhjytnwQ0NClSPpWpmufOuGPN5dDmvIa32FygC2gneKK4A1oSx86V1l55hyUWUYFQ==", "cpu": [ "arm64" ], @@ -4065,9 +3440,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz", - "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.4.tgz", + "integrity": "sha512-3T8GPjH6mixCd0YPn0bXtcuSXi1Lj+15Ujw2CEb7dd24j9thcKscCf88IV7n76WaAdorOzAgSSbuVRg4C8V8Qw==", "cpu": [ "x64" ], @@ -4079,9 +3454,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz", - "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.4.tgz", + "integrity": "sha512-UPMMNeC4LXW7ZSHxeP3Edv09aLsFUMaD1TSVW6n1CWMECnUIJMFFB7+XC2lZTdPtvB36tYC0cJWc86mzSsaviw==", "cpu": [ "arm" ], @@ -4093,9 +3468,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz", - "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.4.tgz", + "integrity": "sha512-H8uwlV0otHs5Q7WAMSoyvjV9DJPiy5nJ/xnHolY0QptLPjaSsuX7tw+SPIfiYH6cnVx3fe4EWFafo6gH6ekZKA==", "cpu": [ "arm" ], @@ -4107,9 +3482,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz", - "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.4.tgz", + "integrity": "sha512-BLRwSRwICXz0TXkbIbqJ1ibK+/dSBpTJqDClF61GWIrxTXZWQE78ROeIhgl5MjVs4B4gSLPCFeD4xML9vbzvCQ==", "cpu": [ "arm64" ], @@ -4121,9 +3496,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz", - "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.4.tgz", + "integrity": "sha512-6bySEjOTbmVcPJAywjpGLckK793A0TJWSbIa0sVwtVGfe/Nz6gOWHOwkshUIAp9j7wg2WKcA4Snu7Y1nUZyQew==", "cpu": [ "arm64" ], @@ -4135,9 +3510,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz", - "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.4.tgz", + "integrity": "sha512-U0ow3bXYJZ5MIbchVusxEycBw7bO6C2u5UvD31i5IMTrnt2p4Fh4ZbHSdc/31TScIJQYHwxbj05BpevB3201ug==", "cpu": [ "loong64" ], @@ -4149,9 +3524,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz", - "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.4.tgz", + "integrity": "sha512-iujDk07ZNwGLVn0YIWM80SFN039bHZHCdCCuX9nyx3Jsa2d9V/0Y32F+YadzwbvDxhSeVo9zefkoPnXEImnM5w==", "cpu": [ "ppc64" ], @@ -4163,9 +3538,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz", - "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.4.tgz", + "integrity": "sha512-MUtAktiOUSu+AXBpx1fkuG/Bi5rhlorGs3lw5QeJ2X3ziEGAq7vFNdWVde6XGaVqi0LGSvugwjoxSNJfHFTC0g==", "cpu": [ "riscv64" ], @@ -4177,9 +3552,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz", - "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.4.tgz", + "integrity": "sha512-btm35eAbDfPtcFEgaXCI5l3c2WXyzwiE8pArhd66SDtoLWmgK5/M7CUxmUglkwtniPzwvWioBKKl6IXLbPf2sQ==", "cpu": [ "riscv64" ], @@ -4191,9 +3566,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz", - "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.4.tgz", + "integrity": "sha512-uJlhKE9ccUTCUlK+HUz/80cVtx2RayadC5ldDrrDUFaJK0SNb8/cCmC9RhBhIWuZ71Nqj4Uoa9+xljKWRogdhA==", "cpu": [ "s390x" ], @@ -4205,9 +3580,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz", - "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.4.tgz", + "integrity": "sha512-jjEMkzvASQBbzzlzf4os7nzSBd/cvPrpqXCUOqoeCh1dQ4BP3RZCJk8XBeik4MUln3m+8LeTJcY54C/u8wb3DQ==", "cpu": [ "x64" ], @@ -4219,9 +3594,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz", - "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.4.tgz", + "integrity": "sha512-lu90KG06NNH19shC5rBPkrh6mrTpq5kviFylPBXQVpdEu0yzb0mDgyxLr6XdcGdBIQTH/UAhDJnL+APZTBu1aQ==", "cpu": [ "x64" ], @@ -4233,9 +3608,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz", - "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.4.tgz", + "integrity": "sha512-dFDcmLwsUzhAm/dn0+dMOQZoONVYBtgik0VuY/d5IJUUb787L3Ko/ibvTvddqhb3RaB7vFEozYevHN4ox22R/w==", "cpu": [ "arm64" ], @@ -4247,9 +3622,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz", - "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.4.tgz", + "integrity": "sha512-WvUpUAWmUxZKtRnQWpRKnLW2DEO8HB/l8z6oFFMNuHndMzFTJEXzaYJ5ZAmzNw0L21QQJZsUQFt2oPf3ykAD/w==", "cpu": [ "arm64" ], @@ -4261,9 +3636,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz", - "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.4.tgz", + "integrity": "sha512-JGbeF2/FDU0x2OLySw/jgvkwWUo05BSiJK0dtuI4LyuXbz3wKiC1xHhLB1Tqm5VU6ZZDmAorj45r/IgWNWku5g==", "cpu": [ "ia32" ], @@ -4275,9 +3650,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz", - "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.4.tgz", + "integrity": "sha512-zuuC7AyxLWLubP+mlUwEyR8M1ixW1ERNPHJfXm8x7eQNP4Pzkd7hS3qBuKBR70VRiQ04Kw8FNfRMF5TNxuZq2g==", "cpu": [ "x64" ], @@ -4289,9 +3664,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz", - "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.4.tgz", + "integrity": "sha512-Sbx45u/Lbb5RyptSbX7/3deP+/lzEmZ0BTSHxwxN/IMOZDZf8S0AGo0hJD5n/LQssxb5Z3B4og4P2X6Dd8acCA==", "cpu": [ "x64" ], @@ -4316,9 +3691,9 @@ } }, "node_modules/@standard-schema/spec": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", - "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", "dev": true, "license": "MIT" }, @@ -4558,66 +3933,6 @@ "node": ">=14.0.0" } }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": { - "version": "1.7.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/wasi-threads": "1.1.0", - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": { - "version": "1.7.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/core": "^1.7.1", - "@emnapi/runtime": "^1.7.1", - "@tybys/wasm-util": "^0.10.1" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": { - "version": "0.10.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": { - "version": "2.8.1", - "dev": true, - "inBundle": true, - "license": "0BSD", - "optional": true - }, "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { "version": "4.1.18", "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", @@ -4679,12 +3994,12 @@ } }, "node_modules/@tanstack/react-virtual": { - "version": "3.13.14", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.14.tgz", - "integrity": "sha512-WG0d7mBD54eA7dgA3+sO5csS0B49QKqM6Gy5Rf31+Oq/LTKROQSao9m2N/vz1IqVragOKU5t5k1LAcqh/DfTxw==", + "version": "3.13.13", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.13.tgz", + "integrity": "sha512-4o6oPMDvQv+9gMi8rE6gWmsOjtUZUYIJHv7EB+GblyYdi8U6OqLl8rhHWIUZSL1dUU2dPwTdTgybCKf9EjIrQg==", "license": "MIT", "dependencies": { - "@tanstack/virtual-core": "3.13.14" + "@tanstack/virtual-core": "3.13.13" }, "funding": { "type": "github", @@ -4696,9 +4011,9 @@ } }, "node_modules/@tanstack/virtual-core": { - "version": "3.13.14", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.14.tgz", - "integrity": "sha512-b5Uvd8J2dc7ICeX9SRb/wkCxWk7pUwN214eEPAQsqrsktSKTCmyLxOQWSMgogBByXclZeAdgZ3k4o0fIYUIBqQ==", + "version": "3.13.13", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.13.tgz", + "integrity": "sha512-uQFoSdKKf5S8k51W5t7b2qpfkyIbdHMzAn+AMQvHPxKUPeo1SsGaA4JRISQT87jm28b7z8OEqPcg1IOZagQHcA==", "license": "MIT", "funding": { "type": "github", @@ -4726,6 +4041,33 @@ "node": ">=18" } }, + "node_modules/@testing-library/jest-dom": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, "node_modules/@testing-library/react": { "version": "16.3.1", "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz", @@ -4931,9 +4273,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "25.0.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz", - "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + "version": "25.0.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.2.tgz", + "integrity": "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==", "dev": true, "license": "MIT", "dependencies": { @@ -5021,20 +4363,20 @@ } }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.51.0.tgz", - "integrity": "sha512-XtssGWJvypyM2ytBnSnKtHYOGT+4ZwTnBVl36TA4nRO2f4PRNGz5/1OszHzcZCvcBMh+qb7I06uoCmLTRdR9og==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.49.0.tgz", + "integrity": "sha512-JXij0vzIaTtCwu6SxTh8qBc66kmf1xs7pI4UOiMDFVct6q86G0Zs7KRcEoJgY3Cav3x5Tq0MF5jwgpgLqgKG3A==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/type-utils": "8.51.0", - "@typescript-eslint/utils": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/type-utils": "8.49.0", + "@typescript-eslint/utils": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "ignore": "^7.0.0", "natural-compare": "^1.4.0", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5044,7 +4386,7 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^8.51.0", + "@typescript-eslint/parser": "^8.49.0", "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } @@ -5060,16 +4402,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.51.0.tgz", - "integrity": "sha512-3xP4XzzDNQOIqBMWogftkwxhg5oMKApqY0BAflmLZiFYHqyhSOxv/cd/zPQLTcCXr4AkaKb25joocY0BD1WC6A==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.49.0.tgz", + "integrity": "sha512-N9lBGA9o9aqb1hVMc9hzySbhKibHmB+N3IpoShyV6HyQYRGIhlrO5rQgttypi+yEeKsKI4idxC8Jw6gXKD4THA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4" }, "engines": { @@ -5085,14 +4427,14 @@ } }, "node_modules/@typescript-eslint/project-service": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.51.0.tgz", - "integrity": "sha512-Luv/GafO07Z7HpiI7qeEW5NW8HUtZI/fo/kE0YbtQEFpJRUuR0ajcWfCE5bnMvL7QQFrmT/odMe8QZww8X2nfQ==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.49.0.tgz", + "integrity": "sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.51.0", - "@typescript-eslint/types": "^8.51.0", + "@typescript-eslint/tsconfig-utils": "^8.49.0", + "@typescript-eslint/types": "^8.49.0", "debug": "^4.3.4" }, "engines": { @@ -5107,14 +4449,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.51.0.tgz", - "integrity": "sha512-JhhJDVwsSx4hiOEQPeajGhCWgBMBwVkxC/Pet53EpBVs7zHHtayKefw1jtPaNRXpI9RA2uocdmpdfE7T+NrizA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.49.0.tgz", + "integrity": "sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0" + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5125,9 +4467,9 @@ } }, "node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.51.0.tgz", - "integrity": "sha512-Qi5bSy/vuHeWyir2C8u/uqGMIlIDu8fuiYWv48ZGlZ/k+PRPHtaAu7erpc7p5bzw2WNNSniuxoMSO4Ar6V9OXw==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.49.0.tgz", + "integrity": "sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA==", "dev": true, "license": "MIT", "engines": { @@ -5142,17 +4484,17 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.51.0.tgz", - "integrity": "sha512-0XVtYzxnobc9K0VU7wRWg1yiUrw4oQzexCG2V2IDxxCxhqBMSMbjB+6o91A+Uc0GWtgjCa3Y8bi7hwI0Tu4n5Q==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.49.0.tgz", + "integrity": "sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/utils": "8.51.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/utils": "8.49.0", "debug": "^4.3.4", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5167,9 +4509,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.51.0.tgz", - "integrity": "sha512-TizAvWYFM6sSscmEakjY3sPqGwxZRSywSsPEiuZF6d5GmGD9Gvlsv0f6N8FvAAA0CD06l3rIcWNbsN1e5F/9Ag==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.49.0.tgz", + "integrity": "sha512-e9k/fneezorUo6WShlQpMxXh8/8wfyc+biu6tnAqA81oWrEic0k21RHzP9uqqpyBBeBKu4T+Bsjy9/b8u7obXQ==", "dev": true, "license": "MIT", "engines": { @@ -5181,21 +4523,21 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.51.0.tgz", - "integrity": "sha512-1qNjGqFRmlq0VW5iVlcyHBbCjPB7y6SxpBkrbhNWMy/65ZoncXCEPJxkRZL8McrseNH6lFhaxCIaX+vBuFnRng==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.49.0.tgz", + "integrity": "sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/project-service": "8.51.0", - "@typescript-eslint/tsconfig-utils": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/project-service": "8.49.0", + "@typescript-eslint/tsconfig-utils": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5235,16 +4577,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.51.0.tgz", - "integrity": "sha512-11rZYxSe0zabiKaCP2QAwRf/dnmgFgvTmeDTtZvUvXG3UuAdg/GU02NExmmIXzz3vLGgMdtrIosI84jITQOxUA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.49.0.tgz", + "integrity": "sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0" + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5259,13 +4601,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.51.0.tgz", - "integrity": "sha512-mM/JRQOzhVN1ykejrvwnBRV3+7yTKK8tVANVN3o1O0t0v7o+jqdVu9crPy5Y9dov15TJk/FTIgoUGHrTOVL3Zg==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.49.0.tgz", + "integrity": "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/types": "8.49.0", "eslint-visitor-keys": "^4.2.1" }, "engines": { @@ -5304,16 +4646,16 @@ } }, "node_modules/@vitest/expect": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz", - "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.15.tgz", + "integrity": "sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==", "dev": true, "license": "MIT", "dependencies": { "@standard-schema/spec": "^1.0.0", "@types/chai": "^5.2.2", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/spy": "4.0.15", + "@vitest/utils": "4.0.15", "chai": "^6.2.1", "tinyrainbow": "^3.0.3" }, @@ -5322,13 +4664,13 @@ } }, "node_modules/@vitest/mocker": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz", - "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.15.tgz", + "integrity": "sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "4.0.16", + "@vitest/spy": "4.0.15", "estree-walker": "^3.0.3", "magic-string": "^0.30.21" }, @@ -5349,9 +4691,9 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz", - "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.15.tgz", + "integrity": "sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==", "dev": true, "license": "MIT", "dependencies": { @@ -5362,13 +4704,13 @@ } }, "node_modules/@vitest/runner": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz", - "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.15.tgz", + "integrity": "sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/utils": "4.0.16", + "@vitest/utils": "4.0.15", "pathe": "^2.0.3" }, "funding": { @@ -5376,13 +4718,13 @@ } }, "node_modules/@vitest/snapshot": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz", - "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.15.tgz", + "integrity": "sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.15", "magic-string": "^0.30.21", "pathe": "^2.0.3" }, @@ -5391,9 +4733,9 @@ } }, "node_modules/@vitest/spy": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz", - "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.15.tgz", + "integrity": "sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==", "dev": true, "license": "MIT", "funding": { @@ -5401,13 +4743,13 @@ } }, "node_modules/@vitest/utils": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz", - "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.15.tgz", + "integrity": "sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.15", "tinyrainbow": "^3.0.3" }, "funding": { @@ -5425,37 +4767,47 @@ } }, "node_modules/@xterm/addon-fit": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz", - "integrity": "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==", - "license": "MIT" + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.10.0.tgz", + "integrity": "sha512-UFYkDm4HUahf2lnEyHvio51TNGiLK66mqP2JoATy7hRZeXaGMRDr00JiSF7m63vR5WKATF605yEggJKsw0JpMQ==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-serialize": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.14.0.tgz", - "integrity": "sha512-uteyTU1EkrQa2Ux6P/uFl2fzmXI46jy5uoQMKEOM0fKTyiW7cSn0WrFenHm5vO5uEXX/GpwW/FgILvv3r0WbkA==", - "license": "MIT" + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.13.0.tgz", + "integrity": "sha512-kGs8o6LWAmN1l2NpMp01/YkpxbmO4UrfWybeGu79Khw5K9+Krp7XhXbBTOTc3GJRRhd6EmILjpR8k5+odY39YQ==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-web-links": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.12.0.tgz", - "integrity": "sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw==", - "license": "MIT" + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.11.0.tgz", + "integrity": "sha512-nIHQ38pQI+a5kXnRaTgwqSHnX7KE6+4SVoceompgHL26unAxdfP6IPqUTSYPQgSwM56hsElfoNrrW5V7BUED/Q==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-webgl": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.19.0.tgz", - "integrity": "sha512-b3fMOsyLVuCeNJWxolACEUED0vm7qC0cy4wRvf3oURSzDTYVQiGPhTnhWZwIHdvC48Y+oLhvYXnY4XDXPoJo6A==", - "license": "MIT" + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.18.0.tgz", + "integrity": "sha512-xCnfMBTI+/HKPdRnSOHaJDRqEpq2Ugy8LEj9GiY4J3zJObo3joylIFaMvzBwbYRg8zLtkO0KQaStCeSfoaI2/w==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/xterm": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz", - "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz", + "integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==", "license": "MIT", - "workspaces": [ - "addons/*" - ] + "peer": true }, "node_modules/7zip-bin": { "version": "5.2.0", @@ -5465,14 +4817,11 @@ "license": "MIT" }, "node_modules/abbrev": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", - "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", "dev": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } + "license": "ISC" }, "node_modules/acorn": { "version": "8.15.0", @@ -5680,63 +5029,12 @@ "semver": "^7.3.5", "tar": "^6.0.5", "yargs": "^17.0.1" - }, - "bin": { - "electron-rebuild": "lib/cli.js" - }, - "engines": { - "node": ">=12.13.0" - } - }, - "node_modules/app-builder-lib/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/app-builder-lib/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/app-builder-lib/node_modules/node-abi": { - "version": "3.85.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", - "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/app-builder-lib/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", + }, + "bin": { + "electron-rebuild": "lib/cli.js" + }, "engines": { - "node": ">= 10.0.0" + "node": ">=12.13.0" } }, "node_modules/argparse": { @@ -6074,25 +5372,15 @@ "license": "MIT" }, "node_modules/baseline-browser-mapping": { - "version": "2.9.11", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", - "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + "version": "2.9.7", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz", + "integrity": "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg==", "dev": true, "license": "Apache-2.0", "bin": { "baseline-browser-mapping": "dist/cli.js" } }, - "node_modules/bidi-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", - "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", - "dev": true, - "license": "MIT", - "dependencies": { - "require-from-string": "^2.0.2" - } - }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", @@ -6253,44 +5541,6 @@ "node": ">=12.0.0" } }, - "node_modules/builder-util/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/builder-util/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/builder-util/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/cac": { "version": "6.7.14", "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", @@ -6302,118 +5552,43 @@ } }, "node_modules/cacache": { - "version": "19.0.1", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz", - "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==", + "version": "16.1.3", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz", + "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", "dev": true, "license": "ISC", "dependencies": { - "@npmcli/fs": "^4.0.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "p-map": "^7.0.2", - "ssri": "^12.0.0", - "tar": "^7.4.3", - "unique-filename": "^4.0.0" + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" }, "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/cacache/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/cacache/node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/cacache/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/cacache/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/cacache/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", "dev": true, "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/cacache/node_modules/tar": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", - "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.1.0", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/cacache/node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "dev": true, - "license": "BlueOak-1.0.0", "engines": { - "node": ">=18" + "node": ">=12" } }, "node_modules/cacheable-lookup": { @@ -6506,9 +5681,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001762", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz", - "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==", + "version": "1.0.30001760", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz", + "integrity": "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw==", "dev": true, "funding": [ { @@ -6537,9 +5712,9 @@ } }, "node_modules/chai": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", - "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.1.tgz", + "integrity": "sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==", "dev": true, "license": "MIT", "engines": { @@ -6674,19 +5849,16 @@ } }, "node_modules/cli-cursor": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", - "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", "dev": true, "license": "MIT", "dependencies": { - "restore-cursor": "^5.0.0" + "restore-cursor": "^3.1.0" }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/cli-spinners": { @@ -6735,37 +5907,6 @@ "node": ">=12" } }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, "node_modules/clone": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", @@ -6933,6 +6074,16 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/config-file-ts/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", @@ -6968,24 +6119,6 @@ "optional": true, "peer": true }, - "node_modules/cross-env": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.1.0.tgz", - "integrity": "sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@epic-web/invariant": "^1.0.0", - "cross-spawn": "^7.0.6" - }, - "bin": { - "cross-env": "dist/bin/cross-env.js", - "cross-env-shell": "dist/bin/cross-env-shell.js" - }, - "engines": { - "node": ">=20" - } - }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -7001,19 +6134,12 @@ "node": ">= 8" } }, - "node_modules/css-tree": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", - "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", "dev": true, - "license": "MIT", - "dependencies": { - "mdn-data": "2.12.2", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" - } + "license": "MIT" }, "node_modules/cssesc": { "version": "3.0.0", @@ -7028,29 +6154,17 @@ } }, "node_modules/cssstyle": { - "version": "5.3.6", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.6.tgz", - "integrity": "sha512-legscpSpgSAeGEe0TNcai97DKt9Vd9AsAdOL7Uoetb52Ar/8eJm3LIa39qpv8wWzLFlNG4vVvppQM+teaMPj3A==", + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", + "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", "dev": true, "license": "MIT", "dependencies": { - "@asamuzakjp/css-color": "^4.1.1", - "@csstools/css-syntax-patches-for-csstree": "^1.0.21", - "css-tree": "^3.1.0", - "lru-cache": "^11.2.4" + "@asamuzakjp/css-color": "^3.2.0", + "rrweb-cssom": "^0.8.0" }, "engines": { - "node": ">=20" - } - }, - "node_modules/cssstyle/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" + "node": ">=18" } }, "node_modules/csstype": { @@ -7060,17 +6174,17 @@ "license": "MIT" }, "node_modules/data-urls": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", - "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", + "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", "dev": true, "license": "MIT", "dependencies": { "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^15.0.0" + "whatwg-url": "^14.0.0" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/data-view-buffer": { @@ -7336,63 +6450,25 @@ "brace-expansion": "^1.1.7" }, "engines": { - "node": "*" - } - }, - "node_modules/dmg-builder": { - "version": "26.0.12", - "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz", - "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==", - "dev": true, - "license": "MIT", - "dependencies": { - "app-builder-lib": "26.0.12", - "builder-util": "26.0.11", - "builder-util-runtime": "9.3.1", - "fs-extra": "^10.1.0", - "iconv-lite": "^0.6.2", - "js-yaml": "^4.1.0" - }, - "optionalDependencies": { - "dmg-license": "^1.0.11" - } - }, - "node_modules/dmg-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" + "node": "*" } }, - "node_modules/dmg-builder/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "node_modules/dmg-builder": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz", + "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==", "dev": true, "license": "MIT", "dependencies": { - "universalify": "^2.0.0" + "app-builder-lib": "26.0.12", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "fs-extra": "^10.1.0", + "iconv-lite": "^0.6.2", + "js-yaml": "^4.1.0" }, "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/dmg-builder/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" + "dmg-license": "^1.0.11" } }, "node_modules/dmg-license": { @@ -7568,44 +6644,6 @@ "electron-winstaller": "5.4.0" } }, - "node_modules/electron-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-builder/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-builder/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-log": { "version": "5.4.3", "resolved": "https://registry.npmjs.org/electron-log/-/electron-log-5.4.3.tgz", @@ -7632,44 +6670,6 @@ "mime": "^2.5.2" } }, - "node_modules/electron-publish/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-publish/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-publish/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-to-chromium": { "version": "1.5.267", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", @@ -7693,41 +6693,6 @@ "tiny-typed-emitter": "^2.1.0" } }, - "node_modules/electron-updater/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-updater/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-updater/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-vite": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/electron-vite/-/electron-vite-5.0.0.tgz", @@ -7796,6 +6761,28 @@ "node": ">=6 <7 || >=8" } }, + "node_modules/electron-winstaller/node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "dev": true, + "license": "MIT", + "peer": true, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/electron-winstaller/node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/electron/node_modules/@types/node": { "version": "22.19.3", "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", @@ -8353,9 +7340,9 @@ } }, "node_modules/esquery": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", - "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", "dev": true, "license": "BSD-3-Clause", "dependencies": { @@ -8511,6 +7498,24 @@ "pend": "~1.2.0" } }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, "node_modules/file-entry-cache": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", @@ -8641,6 +7646,19 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/form-data": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", @@ -8700,31 +7718,30 @@ } }, "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", "license": "MIT", "dependencies": { "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": ">=6 <7 || >=8" + "node": ">=12" } }, "node_modules/fs-minipass": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", - "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.0.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">= 8" } }, "node_modules/fs.realpath": { @@ -8916,9 +7933,9 @@ } }, "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "license": "ISC", @@ -8926,12 +7943,11 @@ "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "minimatch": "^5.0.1", + "once": "^1.3.0" }, "engines": { - "node": "*" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -8950,17 +7966,27 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", "dev": true, "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=10" } }, "node_modules/global-agent": { @@ -8983,9 +8009,9 @@ } }, "node_modules/globals": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-17.0.0.tgz", - "integrity": "sha512-gv5BeD2EssA793rlFWVPMMCqefTlpusw6/2TbAVMy0FzcG8wKJn4O+NqJ4+XWmmwrayJgw5TzrmWjFgmz1XPqw==", + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", + "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", "dev": true, "license": "MIT", "engines": { @@ -9242,16 +8268,16 @@ "license": "ISC" }, "node_modules/html-encoding-sniffer": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", - "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", "dev": true, "license": "MIT", "dependencies": { - "@exodus/bytes": "^1.6.0" + "whatwg-encoding": "^3.1.1" }, "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + "node": ">=18" } }, "node_modules/html-parse-stringify": { @@ -10152,35 +9178,35 @@ } }, "node_modules/jsdom": { - "version": "27.4.0", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.4.0.tgz", - "integrity": "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==", + "version": "26.1.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-26.1.0.tgz", + "integrity": "sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg==", "dev": true, "license": "MIT", "dependencies": { - "@acemir/cssom": "^0.9.28", - "@asamuzakjp/dom-selector": "^6.7.6", - "@exodus/bytes": "^1.6.0", - "cssstyle": "^5.3.4", - "data-urls": "^6.0.0", - "decimal.js": "^10.6.0", - "html-encoding-sniffer": "^6.0.0", + "cssstyle": "^4.2.1", + "data-urls": "^5.0.0", + "decimal.js": "^10.5.0", + "html-encoding-sniffer": "^4.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", - "parse5": "^8.0.0", + "nwsapi": "^2.2.16", + "parse5": "^7.2.1", + "rrweb-cssom": "^0.8.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", - "tough-cookie": "^6.0.0", + "tough-cookie": "^5.1.1", "w3c-xmlserializer": "^5.0.0", - "webidl-conversions": "^8.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^3.1.1", "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^15.1.0", - "ws": "^8.18.3", + "whatwg-url": "^14.1.1", + "ws": "^8.18.0", "xml-name-validator": "^5.0.0" }, "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + "node": ">=18" }, "peerDependencies": { "canvas": "^3.0.0" @@ -10247,11 +9273,13 @@ } }, "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "dev": true, + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, "optionalDependencies": { "graceful-fs": "^4.1.6" } @@ -10616,6 +9644,19 @@ "node": ">=20.0.0" } }, + "node_modules/listr2/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/listr2/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -10646,6 +9687,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/listr2/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, "node_modules/listr2/node_modules/is-fullwidth-code-point": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", @@ -10696,6 +9744,58 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/listr2/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/listr2/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/listr2/node_modules/wrap-ansi/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -10776,6 +9876,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/log-update/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/log-update/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -10789,6 +9902,29 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/log-update/node_modules/cli-cursor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, "node_modules/log-update/node_modules/is-fullwidth-code-point": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", @@ -10805,6 +9941,52 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/log-update/node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/restore-cursor": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/log-update/node_modules/slice-ansi": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz", @@ -10822,6 +10004,58 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, + "node_modules/log-update/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/log-update/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/longest-streak": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", @@ -10866,9 +10100,9 @@ } }, "node_modules/lucide-react": { - "version": "0.562.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz", - "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==", + "version": "0.560.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.560.0.tgz", + "integrity": "sha512-NwKoUA/aBShsdL8WE5lukV2F/tjHzQRlonQs7fkNGI1sCT0Ay4a9Ap3ST2clUUkcY+9eQ0pBe2hybTQd2fmyDA==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" @@ -10896,26 +10130,83 @@ } }, "node_modules/make-fetch-happen": { - "version": "14.0.3", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz", - "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==", + "version": "10.2.1", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", + "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", "dev": true, "license": "ISC", "dependencies": { - "@npmcli/agent": "^3.0.0", - "cacache": "^19.0.1", - "http-cache-semantics": "^4.1.1", - "minipass": "^7.0.2", - "minipass-fetch": "^4.0.0", + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^1.0.0", - "proc-log": "^5.0.0", + "negotiator": "^0.6.3", "promise-retry": "^2.0.1", - "ssri": "^12.0.0" + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" } }, "node_modules/markdown-table": { @@ -11234,13 +10525,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdn-data": { - "version": "2.12.2", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", - "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", - "dev": true, - "license": "CC0-1.0" - }, "node_modules/micromark": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", @@ -11818,6 +11102,19 @@ "node": ">=8.6" } }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/mime": { "version": "2.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", @@ -11887,6 +11184,16 @@ "node": ">=4" } }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/minimatch": { "version": "10.1.1", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", @@ -11914,41 +11221,44 @@ } }, "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", "dev": true, "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=8" } }, "node_modules/minipass-collect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", - "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.0.0" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">= 8" } }, "node_modules/minipass-fetch": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz", - "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz", + "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", "dev": true, "license": "MIT", "dependencies": { - "minipass": "^7.0.3", + "minipass": "^3.1.6", "minipass-sized": "^1.0.3", - "minizlib": "^3.0.1" + "minizlib": "^2.1.2" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" }, "optionalDependencies": { "encoding": "^0.1.13" @@ -11967,26 +11277,6 @@ "node": ">= 8" } }, - "node_modules/minipass-flush/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-flush/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, "node_modules/minipass-pipeline": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", @@ -12000,26 +11290,6 @@ "node": ">=8" } }, - "node_modules/minipass-pipeline/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-pipeline/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, "node_modules/minipass-sized": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", @@ -12033,20 +11303,7 @@ "node": ">=8" } }, - "node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-sized/node_modules/yallist": { + "node_modules/minipass/node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", @@ -12054,18 +11311,26 @@ "license": "ISC" }, "node_modules/minizlib": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", - "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", "dev": true, "license": "MIT", "dependencies": { - "minipass": "^7.1.2" + "minipass": "^3.0.0", + "yallist": "^4.0.0" }, "engines": { - "node": ">= 18" + "node": ">= 8" } }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", @@ -12166,9 +11431,9 @@ "license": "MIT" }, "node_modules/negotiator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", - "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", "dev": true, "license": "MIT", "engines": { @@ -12176,16 +11441,16 @@ } }, "node_modules/node-abi": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-4.24.0.tgz", - "integrity": "sha512-u2EC1CeNe25uVtX3EZbdQ275c74zdZmmpzrHEQh2aIYqoVjlglfUpOX9YY85x1nlBydEKDVaSmMNhR7N82Qj8A==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", + "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", "dev": true, "license": "MIT", "dependencies": { - "semver": "^7.6.3" + "semver": "^7.3.5" }, "engines": { - "node": ">=22.12.0" + "node": ">=10" } }, "node_modules/node-addon-api": { @@ -12206,94 +11471,6 @@ "semver": "^7.3.5" } }, - "node_modules/node-gyp": { - "version": "11.5.0", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.5.0.tgz", - "integrity": "sha512-ra7Kvlhxn5V9Slyus0ygMa2h+UqExPqUIkfk7Pc8QTLT956JLSy51uWFwHtIYy0vI8cB4BDhc/S03+880My/LQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "env-paths": "^2.2.0", - "exponential-backoff": "^3.1.1", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^14.0.3", - "nopt": "^8.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5", - "tar": "^7.4.3", - "tinyglobby": "^0.2.12", - "which": "^5.0.0" - }, - "bin": { - "node-gyp": "bin/node-gyp.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/node-gyp/node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/node-gyp/node_modules/isexe": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", - "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16" - } - }, - "node_modules/node-gyp/node_modules/tar": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", - "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.1.0", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/node-gyp/node_modules/which": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz", - "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^3.1.1" - }, - "bin": { - "node-which": "bin/which.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/node-gyp/node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, "node_modules/node-releases": { "version": "2.0.27", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", @@ -12302,19 +11479,19 @@ "license": "MIT" }, "node_modules/nopt": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", - "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", + "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", "dev": true, "license": "ISC", "dependencies": { - "abbrev": "^3.0.0" + "abbrev": "^1.0.0" }, "bin": { "nopt": "bin/nopt.js" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/normalize-url": { @@ -12330,6 +11507,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/nwsapi": { + "version": "2.2.23", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz", + "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==", + "dev": true, + "license": "MIT" + }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -12460,16 +11644,16 @@ } }, "node_modules/onetime": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", - "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, "license": "MIT", "dependencies": { - "mimic-function": "^5.0.0" + "mimic-fn": "^2.1.0" }, "engines": { - "node": ">=18" + "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -12517,69 +11701,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ora/node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "license": "MIT", - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ora/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/own-keys": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", @@ -12641,13 +11762,16 @@ } }, "node_modules/p-map": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", - "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", "dev": true, "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, "engines": { - "node": ">=18" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -12699,9 +11823,9 @@ "license": "MIT" }, "node_modules/parse5": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", - "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", "dev": true, "license": "MIT", "dependencies": { @@ -12772,6 +11896,16 @@ "dev": true, "license": "ISC" }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/pathe": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", @@ -12809,13 +11943,13 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { - "node": ">=8.6" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" @@ -13010,14 +12144,22 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT", + "peer": true + }, "node_modules/proc-log": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz", - "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz", + "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==", "dev": true, "license": "ISC", "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/progress": { @@ -13063,13 +12205,6 @@ "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "dev": true, - "license": "MIT" - }, "node_modules/property-information": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", @@ -13136,12 +12271,12 @@ } }, "node_modules/react-i18next": { - "version": "16.5.1", - "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.1.tgz", - "integrity": "sha512-Hks6UIRZWW4c+qDAnx1csVsCGYeIR4MoBGQgJ+NUoNnO6qLxXuf8zu0xdcinyXUORgGzCdRsexxO1Xzv3sTdnw==", + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.0.tgz", + "integrity": "sha512-IMpPTyCTKxEj8klCrLKUTIUa8uYTd851+jcu2fJuUB9Agkk9Qq8asw4omyeHVnOXHrLgQJGTm5zTvn8HpaPiqw==", "license": "MIT", "dependencies": { - "@babel/runtime": "^7.28.4", + "@babel/runtime": "^7.27.6", "html-parse-stringify": "^3.0.1", "use-sync-external-store": "^1.6.0" }, @@ -13163,12 +12298,11 @@ } }, "node_modules/react-is": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/react-markdown": { "version": "10.1.0", @@ -13255,13 +12389,13 @@ } }, "node_modules/react-resizable-panels": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-4.2.0.tgz", - "integrity": "sha512-X/WbnyT/bgx09KEGvtJvaTr3axRrcBGcJdELIoGXZipCxc2hPwFsH/pfpVgwNVq5LpQxF/E5pPXGTQdjBnidPw==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-3.0.6.tgz", + "integrity": "sha512-b3qKHQ3MLqOgSS+FRYKapNkJZf5EQzuf6+RLiq1/IlTHw99YrZ2NJZLk4hQIzTnnIkRg2LUqyVinu6YWWpUYew==", "license": "MIT", "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" + "react": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "node_modules/react-style-singleton": { @@ -13327,6 +12461,20 @@ "url": "https://paulmillr.com/funding/" } }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -13447,16 +12595,6 @@ "node": ">=0.10.0" } }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/resedit": { "version": "1.7.2", "resolved": "https://registry.npmjs.org/resedit/-/resedit-1.7.2.tgz", @@ -13524,20 +12662,17 @@ } }, "node_modules/restore-cursor": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", - "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", "dev": true, "license": "MIT", "dependencies": { - "onetime": "^7.0.0", - "signal-exit": "^4.1.0" + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/retry": { @@ -13558,18 +12693,55 @@ "license": "MIT" }, "node_modules/rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" } }, "node_modules/roarr": { @@ -13592,9 +12764,9 @@ } }, "node_modules/rollup": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz", - "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.4.tgz", + "integrity": "sha512-YpXaaArg0MvrnJpvduEDYIp7uGOqKXbH9NsHGQ6SxKCOsNAjZF018MmxefFUulVP2KLtiGw1UvZbr+/ekjvlDg==", "dev": true, "license": "MIT", "dependencies": { @@ -13608,31 +12780,38 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.54.0", - "@rollup/rollup-android-arm64": "4.54.0", - "@rollup/rollup-darwin-arm64": "4.54.0", - "@rollup/rollup-darwin-x64": "4.54.0", - "@rollup/rollup-freebsd-arm64": "4.54.0", - "@rollup/rollup-freebsd-x64": "4.54.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.54.0", - "@rollup/rollup-linux-arm-musleabihf": "4.54.0", - "@rollup/rollup-linux-arm64-gnu": "4.54.0", - "@rollup/rollup-linux-arm64-musl": "4.54.0", - "@rollup/rollup-linux-loong64-gnu": "4.54.0", - "@rollup/rollup-linux-ppc64-gnu": "4.54.0", - "@rollup/rollup-linux-riscv64-gnu": "4.54.0", - "@rollup/rollup-linux-riscv64-musl": "4.54.0", - "@rollup/rollup-linux-s390x-gnu": "4.54.0", - "@rollup/rollup-linux-x64-gnu": "4.54.0", - "@rollup/rollup-linux-x64-musl": "4.54.0", - "@rollup/rollup-openharmony-arm64": "4.54.0", - "@rollup/rollup-win32-arm64-msvc": "4.54.0", - "@rollup/rollup-win32-ia32-msvc": "4.54.0", - "@rollup/rollup-win32-x64-gnu": "4.54.0", - "@rollup/rollup-win32-x64-msvc": "4.54.0", + "@rollup/rollup-android-arm-eabi": "4.53.4", + "@rollup/rollup-android-arm64": "4.53.4", + "@rollup/rollup-darwin-arm64": "4.53.4", + "@rollup/rollup-darwin-x64": "4.53.4", + "@rollup/rollup-freebsd-arm64": "4.53.4", + "@rollup/rollup-freebsd-x64": "4.53.4", + "@rollup/rollup-linux-arm-gnueabihf": "4.53.4", + "@rollup/rollup-linux-arm-musleabihf": "4.53.4", + "@rollup/rollup-linux-arm64-gnu": "4.53.4", + "@rollup/rollup-linux-arm64-musl": "4.53.4", + "@rollup/rollup-linux-loong64-gnu": "4.53.4", + "@rollup/rollup-linux-ppc64-gnu": "4.53.4", + "@rollup/rollup-linux-riscv64-gnu": "4.53.4", + "@rollup/rollup-linux-riscv64-musl": "4.53.4", + "@rollup/rollup-linux-s390x-gnu": "4.53.4", + "@rollup/rollup-linux-x64-gnu": "4.53.4", + "@rollup/rollup-linux-x64-musl": "4.53.4", + "@rollup/rollup-openharmony-arm64": "4.53.4", + "@rollup/rollup-win32-arm64-msvc": "4.53.4", + "@rollup/rollup-win32-ia32-msvc": "4.53.4", + "@rollup/rollup-win32-x64-gnu": "4.53.4", + "@rollup/rollup-win32-x64-msvc": "4.53.4", "fsevents": "~2.3.2" } }, + "node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, "node_modules/safe-array-concat": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", @@ -13944,17 +13123,11 @@ "license": "ISC" }, "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } + "license": "ISC" }, "node_modules/simple-update-notifier": { "version": "2.0.0", @@ -14012,18 +13185,31 @@ } }, "node_modules/socks-proxy-agent": { - "version": "8.0.5", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", - "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", "dev": true, "license": "MIT", "dependencies": { - "agent-base": "^7.1.2", - "debug": "^4.3.4", - "socks": "^2.8.3" + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" }, "engines": { - "node": ">= 14" + "node": ">= 10" + } + }, + "node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" } }, "node_modules/source-map": { @@ -14076,16 +13262,16 @@ "optional": true }, "node_modules/ssri": { - "version": "12.0.0", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz", - "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.1.1" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/stackback": { @@ -14177,32 +13363,6 @@ "node": ">=8" } }, - "node_modules/string-width-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/string.prototype.matchall": { "version": "4.0.12", "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", @@ -14316,19 +13476,16 @@ } }, "node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, "license": "MIT", "dependencies": { - "ansi-regex": "^6.0.1" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" + "node": ">=8" } }, "node_modules/strip-ansi-cjs": { @@ -14345,17 +13502,17 @@ "node": ">=8" } }, - "node_modules/strip-ansi/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", "dev": true, "license": "MIT", - "engines": { - "node": ">=12" + "dependencies": { + "min-indent": "^1.0.0" }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" + "engines": { + "node": ">=8" } }, "node_modules/strip-json-comments": { @@ -14470,78 +13627,25 @@ "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "dev": true, - "license": "ISC", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/tar/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/tar/node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=8" - } - }, - "node_modules/tar/node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "license": "MIT", + "license": "ISC", "dependencies": { - "minipass": "^3.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "engines": { - "node": ">= 8" + "node": ">=10" } }, - "node_modules/tar/node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", "dev": true, "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, "engines": { "node": ">=8" } @@ -14579,42 +13683,41 @@ "fs-extra": "^10.0.0" } }, - "node_modules/temp-file/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "node_modules/temp/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, - "license": "MIT", + "license": "ISC", + "peer": true, "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" }, "engines": { - "node": ">=12" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/temp-file/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "node_modules/temp/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "MIT", + "license": "ISC", + "peer": true, "dependencies": { - "universalify": "^2.0.0" + "brace-expansion": "^1.1.7" }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/temp-file/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", "engines": { - "node": ">= 10.0.0" + "node": "*" } }, "node_modules/temp/node_modules/mkdirp": { @@ -14631,6 +13734,21 @@ "mkdirp": "bin/cmd.js" } }, + "node_modules/temp/node_modules/rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, "node_modules/tiny-async-pool": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/tiny-async-pool/-/tiny-async-pool-1.3.0.tgz", @@ -14691,37 +13809,6 @@ "url": "https://github.com/sponsors/SuperchupuDev" } }, - "node_modules/tinyglobby/node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/tinyrainbow": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", @@ -14733,22 +13820,22 @@ } }, "node_modules/tldts": { - "version": "7.0.19", - "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz", - "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.86.tgz", + "integrity": "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==", "dev": true, "license": "MIT", "dependencies": { - "tldts-core": "^7.0.19" + "tldts-core": "^6.1.86" }, "bin": { "tldts": "bin/cli.js" } }, "node_modules/tldts-core": { - "version": "7.0.19", - "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz", - "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.86.tgz", + "integrity": "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==", "dev": true, "license": "MIT" }, @@ -14786,29 +13873,29 @@ } }, "node_modules/tough-cookie": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", - "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.1.2.tgz", + "integrity": "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "tldts": "^7.0.5" + "tldts": "^6.1.32" }, "engines": { "node": ">=16" } }, "node_modules/tr46": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", - "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", + "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", "dev": true, "license": "MIT", "dependencies": { "punycode": "^2.3.1" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/trim-lines": { @@ -14842,9 +13929,9 @@ } }, "node_modules/ts-api-utils": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.3.0.tgz", - "integrity": "sha512-6eg3Y9SF7SsAvGzRHQvvc1skDAhwI4YQ32ui1scxD1Ccr0G5qIIbUBT3pFTKX8kmWIQClHobtUdNuaBgwdfdWg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", "dev": true, "license": "MIT", "engines": { @@ -14980,16 +14067,16 @@ } }, "node_modules/typescript-eslint": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.51.0.tgz", - "integrity": "sha512-jh8ZuM5oEh2PSdyQG9YAEM1TCGuWenLSuSUhf/irbVUNW9O5FhbFVONviN2TgMTBnUmyHv7E56rYnfLZK6TkiA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.49.0.tgz", + "integrity": "sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/eslint-plugin": "8.51.0", - "@typescript-eslint/parser": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/utils": "8.51.0" + "@typescript-eslint/eslint-plugin": "8.49.0", + "@typescript-eslint/parser": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/utils": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -15049,29 +14136,29 @@ } }, "node_modules/unique-filename": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz", - "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz", + "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", "dev": true, "license": "ISC", "dependencies": { - "unique-slug": "^5.0.0" + "unique-slug": "^3.0.0" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/unique-slug": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz", - "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz", + "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", "dev": true, "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/unist-util-is": { @@ -15143,19 +14230,18 @@ } }, "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true, + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "license": "MIT", "engines": { - "node": ">= 4.0.0" + "node": ">= 10.0.0" } }, "node_modules/update-browserslist-db": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", - "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz", + "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==", "dev": true, "funding": [ { @@ -15391,9 +14477,9 @@ } }, "node_modules/vite/node_modules/@esbuild/aix-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", - "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.1.tgz", + "integrity": "sha512-HHB50pdsBX6k47S4u5g/CaLjqS3qwaOVE5ILsq64jyzgMhLuCuZ8rGzM9yhsAjfjkbgUPMzZEPa7DAp7yz6vuA==", "cpu": [ "ppc64" ], @@ -15408,9 +14494,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", - "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.1.tgz", + "integrity": "sha512-kFqa6/UcaTbGm/NncN9kzVOODjhZW8e+FRdSeypWe6j33gzclHtwlANs26JrupOntlcWmB0u8+8HZo8s7thHvg==", "cpu": [ "arm" ], @@ -15425,9 +14511,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", - "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.1.tgz", + "integrity": "sha512-45fuKmAJpxnQWixOGCrS+ro4Uvb4Re9+UTieUY2f8AEc+t7d4AaZ6eUJ3Hva7dtrxAAWHtlEFsXFMAgNnGU9uQ==", "cpu": [ "arm64" ], @@ -15442,9 +14528,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", - "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.1.tgz", + "integrity": "sha512-LBEpOz0BsgMEeHgenf5aqmn/lLNTFXVfoWMUox8CtWWYK9X4jmQzWjoGoNb8lmAYml/tQ/Ysvm8q7szu7BoxRQ==", "cpu": [ "x64" ], @@ -15459,9 +14545,9 @@ } }, "node_modules/vite/node_modules/@esbuild/darwin-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", - "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.1.tgz", + "integrity": "sha512-veg7fL8eMSCVKL7IW4pxb54QERtedFDfY/ASrumK/SbFsXnRazxY4YykN/THYqFnFwJ0aVjiUrVG2PwcdAEqQQ==", "cpu": [ "arm64" ], @@ -15476,9 +14562,9 @@ } }, "node_modules/vite/node_modules/@esbuild/darwin-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", - "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.1.tgz", + "integrity": "sha512-+3ELd+nTzhfWb07Vol7EZ+5PTbJ/u74nC6iv4/lwIU99Ip5uuY6QoIf0Hn4m2HoV0qcnRivN3KSqc+FyCHjoVQ==", "cpu": [ "x64" ], @@ -15493,9 +14579,9 @@ } }, "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", - "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.1.tgz", + "integrity": "sha512-/8Rfgns4XD9XOSXlzUDepG8PX+AVWHliYlUkFI3K3GB6tqbdjYqdhcb4BKRd7C0BhZSoaCxhv8kTcBrcZWP+xg==", "cpu": [ "arm64" ], @@ -15510,9 +14596,9 @@ } }, "node_modules/vite/node_modules/@esbuild/freebsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", - "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.1.tgz", + "integrity": "sha512-GITpD8dK9C+r+5yRT/UKVT36h/DQLOHdwGVwwoHidlnA168oD3uxA878XloXebK4Ul3gDBBIvEdL7go9gCUFzQ==", "cpu": [ "x64" ], @@ -15527,9 +14613,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", - "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.1.tgz", + "integrity": "sha512-ieMID0JRZY/ZeCrsFQ3Y3NlHNCqIhTprJfDgSB3/lv5jJZ8FX3hqPyXWhe+gvS5ARMBJ242PM+VNz/ctNj//eA==", "cpu": [ "arm" ], @@ -15544,9 +14630,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", - "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.1.tgz", + "integrity": "sha512-W9//kCrh/6in9rWIBdKaMtuTTzNj6jSeG/haWBADqLLa9P8O5YSRDzgD5y9QBok4AYlzS6ARHifAb75V6G670Q==", "cpu": [ "arm64" ], @@ -15561,9 +14647,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", - "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.1.tgz", + "integrity": "sha512-VIUV4z8GD8rtSVMfAj1aXFahsi/+tcoXXNYmXgzISL+KB381vbSTNdeZHHHIYqFyXcoEhu9n5cT+05tRv13rlw==", "cpu": [ "ia32" ], @@ -15578,9 +14664,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-loong64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", - "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.1.tgz", + "integrity": "sha512-l4rfiiJRN7sTNI//ff65zJ9z8U+k6zcCg0LALU5iEWzY+a1mVZ8iWC1k5EsNKThZ7XCQ6YWtsZ8EWYm7r1UEsg==", "cpu": [ "loong64" ], @@ -15595,9 +14681,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-mips64el": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", - "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.1.tgz", + "integrity": "sha512-U0bEuAOLvO/DWFdygTHWY8C067FXz+UbzKgxYhXC0fDieFa0kDIra1FAhsAARRJbvEyso8aAqvPdNxzWuStBnA==", "cpu": [ "mips64el" ], @@ -15612,9 +14698,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", - "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.1.tgz", + "integrity": "sha512-NzdQ/Xwu6vPSf/GkdmRNsOfIeSGnh7muundsWItmBsVpMoNPVpM61qNzAVY3pZ1glzzAxLR40UyYM23eaDDbYQ==", "cpu": [ "ppc64" ], @@ -15629,9 +14715,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-riscv64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", - "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.1.tgz", + "integrity": "sha512-7zlw8p3IApcsN7mFw0O1Z1PyEk6PlKMu18roImfl3iQHTnr/yAfYv6s4hXPidbDoI2Q0pW+5xeoM4eTCC0UdrQ==", "cpu": [ "riscv64" ], @@ -15646,9 +14732,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-s390x": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", - "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.1.tgz", + "integrity": "sha512-cGj5wli+G+nkVQdZo3+7FDKC25Uh4ZVwOAK6A06Hsvgr8WqBBuOy/1s+PUEd/6Je+vjfm6stX0kmib5b/O2Ykw==", "cpu": [ "s390x" ], @@ -15663,9 +14749,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", - "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.1.tgz", + "integrity": "sha512-z3H/HYI9MM0HTv3hQZ81f+AKb+yEoCRlUby1F80vbQ5XdzEMyY/9iNlAmhqiBKw4MJXwfgsh7ERGEOhrM1niMA==", "cpu": [ "x64" ], @@ -15680,9 +14766,9 @@ } }, "node_modules/vite/node_modules/@esbuild/netbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", - "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.1.tgz", + "integrity": "sha512-wzC24DxAvk8Em01YmVXyjl96Mr+ecTPyOuADAvjGg+fyBpGmxmcr2E5ttf7Im8D0sXZihpxzO1isus8MdjMCXQ==", "cpu": [ "arm64" ], @@ -15697,9 +14783,9 @@ } }, "node_modules/vite/node_modules/@esbuild/netbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", - "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.1.tgz", + "integrity": "sha512-1YQ8ybGi2yIXswu6eNzJsrYIGFpnlzEWRl6iR5gMgmsrR0FcNoV1m9k9sc3PuP5rUBLshOZylc9nqSgymI+TYg==", "cpu": [ "x64" ], @@ -15714,9 +14800,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", - "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.1.tgz", + "integrity": "sha512-5Z+DzLCrq5wmU7RDaMDe2DVXMRm2tTDvX2KU14JJVBN2CT/qov7XVix85QoJqHltpvAOZUAc3ndU56HSMWrv8g==", "cpu": [ "arm64" ], @@ -15731,9 +14817,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", - "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.1.tgz", + "integrity": "sha512-Q73ENzIdPF5jap4wqLtsfh8YbYSZ8Q0wnxplOlZUOyZy7B4ZKW8DXGWgTCZmF8VWD7Tciwv5F4NsRf6vYlZtqg==", "cpu": [ "x64" ], @@ -15748,9 +14834,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openharmony-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", - "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.1.tgz", + "integrity": "sha512-ajbHrGM/XiK+sXM0JzEbJAen+0E+JMQZ2l4RR4VFwvV9JEERx+oxtgkpoKv1SevhjavK2z2ReHk32pjzktWbGg==", "cpu": [ "arm64" ], @@ -15765,9 +14851,9 @@ } }, "node_modules/vite/node_modules/@esbuild/sunos-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", - "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.1.tgz", + "integrity": "sha512-IPUW+y4VIjuDVn+OMzHc5FV4GubIwPnsz6ubkvN8cuhEqH81NovB53IUlrlBkPMEPxvNnf79MGBoz8rZ2iW8HA==", "cpu": [ "x64" ], @@ -15782,9 +14868,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", - "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.1.tgz", + "integrity": "sha512-RIVRWiljWA6CdVu8zkWcRmGP7iRRIIwvhDKem8UMBjPql2TXM5PkDVvvrzMtj1V+WFPB4K7zkIGM7VzRtFkjdg==", "cpu": [ "arm64" ], @@ -15799,9 +14885,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", - "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.1.tgz", + "integrity": "sha512-2BR5M8CPbptC1AK5JbJT1fWrHLvejwZidKx3UMSF0ecHMa+smhi16drIrCEggkgviBwLYd5nwrFLSl5Kho96RQ==", "cpu": [ "ia32" ], @@ -15816,9 +14902,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", - "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.1.tgz", + "integrity": "sha512-d5X6RMYv6taIymSk8JBP+nxv8DQAMY6A51GPgusqLdK9wBz5wWIXy1KjTck6HnjE9hqJzJRdk+1p/t5soSbCtw==", "cpu": [ "x64" ], @@ -15833,9 +14919,9 @@ } }, "node_modules/vite/node_modules/esbuild": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", - "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.1.tgz", + "integrity": "sha512-yY35KZckJJuVVPXpvjgxiCuVEJT67F6zDeVTv4rizyPrfGBUpZQsvmxnN+C371c2esD/hNMjj4tpBhuueLN7aA==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -15846,50 +14932,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.27.2", - "@esbuild/android-arm": "0.27.2", - "@esbuild/android-arm64": "0.27.2", - "@esbuild/android-x64": "0.27.2", - "@esbuild/darwin-arm64": "0.27.2", - "@esbuild/darwin-x64": "0.27.2", - "@esbuild/freebsd-arm64": "0.27.2", - "@esbuild/freebsd-x64": "0.27.2", - "@esbuild/linux-arm": "0.27.2", - "@esbuild/linux-arm64": "0.27.2", - "@esbuild/linux-ia32": "0.27.2", - "@esbuild/linux-loong64": "0.27.2", - "@esbuild/linux-mips64el": "0.27.2", - "@esbuild/linux-ppc64": "0.27.2", - "@esbuild/linux-riscv64": "0.27.2", - "@esbuild/linux-s390x": "0.27.2", - "@esbuild/linux-x64": "0.27.2", - "@esbuild/netbsd-arm64": "0.27.2", - "@esbuild/netbsd-x64": "0.27.2", - "@esbuild/openbsd-arm64": "0.27.2", - "@esbuild/openbsd-x64": "0.27.2", - "@esbuild/openharmony-arm64": "0.27.2", - "@esbuild/sunos-x64": "0.27.2", - "@esbuild/win32-arm64": "0.27.2", - "@esbuild/win32-ia32": "0.27.2", - "@esbuild/win32-x64": "0.27.2" - } - }, - "node_modules/vite/node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } + "@esbuild/aix-ppc64": "0.27.1", + "@esbuild/android-arm": "0.27.1", + "@esbuild/android-arm64": "0.27.1", + "@esbuild/android-x64": "0.27.1", + "@esbuild/darwin-arm64": "0.27.1", + "@esbuild/darwin-x64": "0.27.1", + "@esbuild/freebsd-arm64": "0.27.1", + "@esbuild/freebsd-x64": "0.27.1", + "@esbuild/linux-arm": "0.27.1", + "@esbuild/linux-arm64": "0.27.1", + "@esbuild/linux-ia32": "0.27.1", + "@esbuild/linux-loong64": "0.27.1", + "@esbuild/linux-mips64el": "0.27.1", + "@esbuild/linux-ppc64": "0.27.1", + "@esbuild/linux-riscv64": "0.27.1", + "@esbuild/linux-s390x": "0.27.1", + "@esbuild/linux-x64": "0.27.1", + "@esbuild/netbsd-arm64": "0.27.1", + "@esbuild/netbsd-x64": "0.27.1", + "@esbuild/openbsd-arm64": "0.27.1", + "@esbuild/openbsd-x64": "0.27.1", + "@esbuild/openharmony-arm64": "0.27.1", + "@esbuild/sunos-x64": "0.27.1", + "@esbuild/win32-arm64": "0.27.1", + "@esbuild/win32-ia32": "0.27.1", + "@esbuild/win32-x64": "0.27.1" } }, "node_modules/vite/node_modules/fsevents": { @@ -15907,33 +14975,20 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/vite/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/vitest": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz", - "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.15.tgz", + "integrity": "sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/expect": "4.0.16", - "@vitest/mocker": "4.0.16", - "@vitest/pretty-format": "4.0.16", - "@vitest/runner": "4.0.16", - "@vitest/snapshot": "4.0.16", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/expect": "4.0.15", + "@vitest/mocker": "4.0.15", + "@vitest/pretty-format": "4.0.15", + "@vitest/runner": "4.0.15", + "@vitest/snapshot": "4.0.15", + "@vitest/spy": "4.0.15", + "@vitest/utils": "4.0.15", "es-module-lexer": "^1.7.0", "expect-type": "^1.2.2", "magic-string": "^0.30.21", @@ -15961,10 +15016,10 @@ "@edge-runtime/vm": "*", "@opentelemetry/api": "^1.9.0", "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", - "@vitest/browser-playwright": "4.0.16", - "@vitest/browser-preview": "4.0.16", - "@vitest/browser-webdriverio": "4.0.16", - "@vitest/ui": "4.0.16", + "@vitest/browser-playwright": "4.0.15", + "@vitest/browser-preview": "4.0.15", + "@vitest/browser-webdriverio": "4.0.15", + "@vitest/ui": "4.0.15", "happy-dom": "*", "jsdom": "*" }, @@ -15998,19 +15053,6 @@ } } }, - "node_modules/vitest/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/void-elements": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", @@ -16044,13 +15086,26 @@ } }, "node_modules/webidl-conversions": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", - "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", "dev": true, "license": "BSD-2-Clause", "engines": { - "node": ">=20" + "node": ">=12" + } + }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" } }, "node_modules/whatwg-mimetype": { @@ -16064,17 +15119,17 @@ } }, "node_modules/whatwg-url": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", - "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "version": "14.2.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", + "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", "dev": true, "license": "MIT", "dependencies": { - "tr46": "^6.0.0", - "webidl-conversions": "^8.0.0" + "tr46": "^5.1.0", + "webidl-conversions": "^7.0.0" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/which": { @@ -16210,18 +15265,18 @@ } }, "node_modules/wrap-ansi": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", - "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, "license": "MIT", "dependencies": { - "ansi-styles": "^6.2.1", - "string-width": "^7.0.0", - "strip-ansi": "^7.1.0" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=18" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/wrap-ansi?sponsor=1" @@ -16246,57 +15301,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/emoji-regex": { - "version": "10.6.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", - "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -16440,9 +15444,10 @@ } }, "node_modules/zod": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.4.tgz", - "integrity": "sha512-Zw/uYiiyF6pUT1qmKbZziChgNPRu+ZRneAsMUDU6IwmXdWt5JwcUfy2bvLOCUtz5UniaN/Zx5aFttZYbYc7O/A==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.0.tgz", + "integrity": "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw==", + "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/colinhacks" diff --git a/apps/frontend/package.json b/apps/frontend/package.json index 1561b64046..3b9e8bda37 100644 --- a/apps/frontend/package.json +++ b/apps/frontend/package.json @@ -48,6 +48,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { + "@anthropic-ai/sdk": "^0.71.2", "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", @@ -68,6 +69,7 @@ "@radix-ui/react-tabs": "^1.1.13", "@radix-ui/react-toast": "^1.2.15", "@radix-ui/react-tooltip": "^1.2.8", + "@sentry/electron": "^7.5.0", "@tailwindcss/typography": "^0.5.19", "@tanstack/react-virtual": "^3.13.13", "@xterm/addon-fit": "^0.11.0", @@ -78,11 +80,14 @@ "chokidar": "^5.0.0", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", + "dotenv": "^16.6.1", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", "i18next": "^25.7.3", "lucide-react": "^0.562.0", + "minimatch": "^10.1.1", "motion": "^12.23.26", + "proper-lockfile": "^4.1.2", "react": "^19.2.3", "react-dom": "^19.2.3", "react-i18next": "^16.5.0", @@ -102,7 +107,9 @@ "@eslint/js": "^9.39.1", "@playwright/test": "^1.52.0", "@tailwindcss/postcss": "^4.1.17", + "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.1.0", + "@types/minimatch": "^5.1.2", "@types/node": "^25.0.0", "@types/react": "^19.2.7", "@types/react-dom": "^19.2.3", @@ -111,7 +118,7 @@ "@vitejs/plugin-react": "^5.1.2", "autoprefixer": "^10.4.22", "cross-env": "^10.1.0", - "electron": "^39.2.7", + "electron": "39.2.7", "electron-builder": "^26.0.12", "electron-vite": "^5.0.0", "eslint": "^9.39.1", @@ -207,7 +214,7 @@ ] }, "linux": { - "icon": "resources/icon.png", + "icon": "resources/icons", "target": [ "AppImage", "deb", diff --git a/apps/frontend/resources/icons/128x128.png b/apps/frontend/resources/icons/128x128.png new file mode 100644 index 0000000000..7e694b434c Binary files /dev/null and b/apps/frontend/resources/icons/128x128.png differ diff --git a/apps/frontend/resources/icons/16x16.png b/apps/frontend/resources/icons/16x16.png new file mode 100644 index 0000000000..bc533838b6 Binary files /dev/null and b/apps/frontend/resources/icons/16x16.png differ diff --git a/apps/frontend/resources/icons/256x256.png b/apps/frontend/resources/icons/256x256.png new file mode 100644 index 0000000000..555230d363 Binary files /dev/null and b/apps/frontend/resources/icons/256x256.png differ diff --git a/apps/frontend/resources/icons/32x32.png b/apps/frontend/resources/icons/32x32.png new file mode 100644 index 0000000000..227e6db694 Binary files /dev/null and b/apps/frontend/resources/icons/32x32.png differ diff --git a/apps/frontend/resources/icons/48x48.png b/apps/frontend/resources/icons/48x48.png new file mode 100644 index 0000000000..29e6b3bc03 Binary files /dev/null and b/apps/frontend/resources/icons/48x48.png differ diff --git a/apps/frontend/resources/icons/512x512.png b/apps/frontend/resources/icons/512x512.png new file mode 100644 index 0000000000..22d476ffc1 Binary files /dev/null and b/apps/frontend/resources/icons/512x512.png differ diff --git a/apps/frontend/resources/icons/64x64.png b/apps/frontend/resources/icons/64x64.png new file mode 100644 index 0000000000..0068c05929 Binary files /dev/null and b/apps/frontend/resources/icons/64x64.png differ diff --git a/apps/frontend/scripts/download-python.cjs b/apps/frontend/scripts/download-python.cjs index 215af7db3c..6c48dc8981 100644 --- a/apps/frontend/scripts/download-python.cjs +++ b/apps/frontend/scripts/download-python.cjs @@ -609,12 +609,12 @@ function installPackages(pythonBin, requirementsPath, targetSitePackages) { // Install packages directly to target directory // --no-compile: Don't create .pyc files (saves space, Python will work without them) - // --no-cache-dir: Don't use pip cache // --target: Install to specific directory + // Note: We intentionally DO use pip's cache to preserve built wheels for packages + // like real_ladybug that must be compiled from source on Intel Mac (no PyPI wheel) const pipArgs = [ '-m', 'pip', 'install', '--no-compile', - '--no-cache-dir', '--target', targetSitePackages, '-r', requirementsPath, ]; diff --git a/apps/frontend/scripts/postinstall.cjs b/apps/frontend/scripts/postinstall.cjs index 41a8ebe645..e4c02e6dee 100644 --- a/apps/frontend/scripts/postinstall.cjs +++ b/apps/frontend/scripts/postinstall.cjs @@ -42,13 +42,36 @@ To install: ================================================================================ `; +/** + * Get electron version from package.json + */ +function getElectronVersion() { + const pkgPath = path.join(__dirname, '..', 'package.json'); + const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8')); + const electronVersion = pkg.devDependencies?.electron || pkg.dependencies?.electron; + if (!electronVersion) { + return null; + } + // Strip leading ^ or ~ from version + return electronVersion.replace(/^[\^~]/, ''); +} + /** * Run electron-rebuild */ function runElectronRebuild() { return new Promise((resolve, reject) => { const npx = isWindows ? 'npx.cmd' : 'npx'; - const child = spawn(npx, ['electron-rebuild'], { + const electronVersion = getElectronVersion(); + const args = ['electron-rebuild']; + + // Explicitly pass electron version if detected + if (electronVersion) { + args.push('-v', electronVersion); + console.log(`[postinstall] Using Electron version: ${electronVersion}`); + } + + const child = spawn(npx, args, { stdio: 'inherit', shell: isWindows, cwd: path.join(__dirname, '..'), @@ -70,12 +93,40 @@ function runElectronRebuild() { * Check if node-pty is already built */ function isNodePtyBuilt() { - const buildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release'); - if (!fs.existsSync(buildDir)) return false; + // Check traditional node-pty build location (local node_modules) + const localBuildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release'); + if (fs.existsSync(localBuildDir)) { + const files = fs.readdirSync(localBuildDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check root node_modules (for npm workspaces) + const rootBuildDir = path.join(__dirname, '..', '..', '..', 'node_modules', 'node-pty', 'build', 'Release'); + if (fs.existsSync(rootBuildDir)) { + const files = fs.readdirSync(rootBuildDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check for @lydell/node-pty with platform-specific prebuilts + const arch = os.arch(); + const platform = os.platform(); + const platformPkg = `@lydell/node-pty-${platform}-${arch}`; + + // Check local node_modules + const localLydellDir = path.join(__dirname, '..', 'node_modules', platformPkg); + if (fs.existsSync(localLydellDir)) { + const files = fs.readdirSync(localLydellDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check root node_modules (for npm workspaces) + const rootLydellDir = path.join(__dirname, '..', '..', '..', 'node_modules', platformPkg); + if (fs.existsSync(rootLydellDir)) { + const files = fs.readdirSync(rootLydellDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } - // Check for the main .node file - const files = fs.readdirSync(buildDir); - return files.some((f) => f.endsWith('.node')); + return false; } /** diff --git a/apps/frontend/src/__mocks__/electron.ts b/apps/frontend/src/__mocks__/electron.ts index 39f45801de..e5569f6893 100644 --- a/apps/frontend/src/__mocks__/electron.ts +++ b/apps/frontend/src/__mocks__/electron.ts @@ -56,7 +56,8 @@ export const ipcRenderer = { on: vi.fn(), once: vi.fn(), removeListener: vi.fn(), - removeAllListeners: vi.fn() + removeAllListeners: vi.fn(), + setMaxListeners: vi.fn() }; // Mock BrowserWindow @@ -125,6 +126,13 @@ export const nativeTheme = { on: vi.fn() }; +// Mock screen +export const screen = { + getPrimaryDisplay: vi.fn(() => ({ + workAreaSize: { width: 1920, height: 1080 } + })) +}; + export default { app, ipcMain, @@ -133,5 +141,6 @@ export default { dialog, contextBridge, shell, - nativeTheme + nativeTheme, + screen }; diff --git a/apps/frontend/src/__mocks__/sentry-electron-main.ts b/apps/frontend/src/__mocks__/sentry-electron-main.ts new file mode 100644 index 0000000000..697d392257 --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-main.ts @@ -0,0 +1 @@ +export * from './sentry-electron-shared'; diff --git a/apps/frontend/src/__mocks__/sentry-electron-renderer.ts b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts new file mode 100644 index 0000000000..697d392257 --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts @@ -0,0 +1 @@ +export * from './sentry-electron-shared'; diff --git a/apps/frontend/src/__mocks__/sentry-electron-shared.ts b/apps/frontend/src/__mocks__/sentry-electron-shared.ts new file mode 100644 index 0000000000..e2c97e98fe --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-shared.ts @@ -0,0 +1,26 @@ +export type SentryErrorEvent = Record; + +export type SentryScope = { + setContext: (key: string, value: Record) => void; +}; + +export type SentryInitOptions = { + beforeSend?: (event: SentryErrorEvent) => SentryErrorEvent | null; + tracesSampleRate?: number; + profilesSampleRate?: number; + dsn?: string; + environment?: string; + release?: string; + debug?: boolean; + enabled?: boolean; +}; + +export function init(_options: SentryInitOptions): void {} + +export function captureException(_error: Error): void {} + +export function withScope(callback: (scope: SentryScope) => void): void { + callback({ + setContext: () => {} + }); +} diff --git a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts index 641f8e968b..432c5f361d 100644 --- a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts +++ b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts @@ -11,7 +11,8 @@ const mockIpcRenderer = { on: vi.fn(), once: vi.fn(), removeListener: vi.fn(), - removeAllListeners: vi.fn() + removeAllListeners: vi.fn(), + setMaxListeners: vi.fn() }; // Mock contextBridge diff --git a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts index 1ef0da9ded..1d9e0540e1 100644 --- a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts +++ b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts @@ -30,9 +30,13 @@ const mockProcess = Object.assign(new EventEmitter(), { }) }); -vi.mock('child_process', () => ({ - spawn: vi.fn(() => mockProcess) -})); +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: vi.fn(() => mockProcess) + }; +}); // Mock claude-profile-manager to bypass auth checks in tests vi.mock('../../main/claude-profile-manager', () => ({ @@ -107,7 +111,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -132,7 +136,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001'); + await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -154,7 +158,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001'); + await manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -178,7 +182,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', { + await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', { parallel: true, workers: 4 }); @@ -204,7 +208,7 @@ describe('Subprocess Spawn Integration', () => { const logHandler = vi.fn(); manager.on('log', logHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate stdout data (must include newline for buffered output processing) mockStdout.emit('data', Buffer.from('Test log output\n')); @@ -220,7 +224,7 @@ describe('Subprocess Spawn Integration', () => { const logHandler = vi.fn(); manager.on('log', logHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate stderr data (must include newline for buffered output processing) mockStderr.emit('data', Buffer.from('Progress: 50%\n')); @@ -236,7 +240,7 @@ describe('Subprocess Spawn Integration', () => { const exitHandler = vi.fn(); manager.on('exit', exitHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate process exit mockProcess.emit('exit', 0); @@ -253,7 +257,7 @@ describe('Subprocess Spawn Integration', () => { const errorHandler = vi.fn(); manager.on('error', errorHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate process error mockProcess.emit('error', new Error('Spawn failed')); @@ -266,7 +270,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); expect(manager.isRunning('task-1')).toBe(true); @@ -293,10 +297,10 @@ describe('Subprocess Spawn Integration', () => { manager.configure(undefined, AUTO_CLAUDE_SOURCE); expect(manager.getRunningTasks()).toHaveLength(0); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); expect(manager.getRunningTasks()).toContain('task-1'); - manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); + await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); expect(manager.getRunningTasks()).toHaveLength(2); }); @@ -307,7 +311,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure('/custom/python3', AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); expect(spawn).toHaveBeenCalledWith( '/custom/python3', @@ -321,8 +325,8 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); - manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); await manager.killAll(); @@ -334,10 +338,10 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); // Start another process for same task - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2'); // Should have killed the first one expect(mockProcess.kill).toHaveBeenCalled(); diff --git a/apps/frontend/src/__tests__/setup.ts b/apps/frontend/src/__tests__/setup.ts index 34f7a6465f..dc2c99dd91 100644 --- a/apps/frontend/src/__tests__/setup.ts +++ b/apps/frontend/src/__tests__/setup.ts @@ -28,6 +28,14 @@ Object.defineProperty(global, 'localStorage', { value: localStorageMock }); +// Mock scrollIntoView for Radix Select in jsdom +if (typeof HTMLElement !== 'undefined' && !HTMLElement.prototype.scrollIntoView) { + Object.defineProperty(HTMLElement.prototype, 'scrollIntoView', { + value: vi.fn(), + writable: true + }); +} + // Test data directory for isolated file operations export const TEST_DATA_DIR = '/tmp/auto-claude-ui-tests'; @@ -88,7 +96,14 @@ if (typeof window !== 'undefined') { success: true, data: { openProjectIds: [], activeProjectId: null, tabOrder: [] } }), - saveTabState: vi.fn().mockResolvedValue({ success: true }) + saveTabState: vi.fn().mockResolvedValue({ success: true }), + // Profile-related API methods (API Profile feature) + getAPIProfiles: vi.fn(), + saveAPIProfile: vi.fn(), + updateAPIProfile: vi.fn(), + deleteAPIProfile: vi.fn(), + setActiveAPIProfile: vi.fn(), + testConnection: vi.fn() }; } diff --git a/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts new file mode 100644 index 0000000000..42bd919b3b --- /dev/null +++ b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts @@ -0,0 +1,126 @@ +import path from 'path'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; + +const mockGetToolPath = vi.fn<() => string>(); +const mockGetAugmentedEnv = vi.fn<() => Record>(); + +vi.mock('../cli-tool-manager', () => ({ + getToolPath: mockGetToolPath, +})); + +vi.mock('../env-utils', () => ({ + getAugmentedEnv: mockGetAugmentedEnv, +})); + +describe('claude-cli-utils', () => { + beforeEach(() => { + mockGetToolPath.mockReset(); + mockGetAugmentedEnv.mockReset(); + vi.resetModules(); + }); + + it('prepends the CLI directory to PATH when the command is absolute', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = { + PATH: process.platform === 'win32' + ? 'C:\\Windows\\System32' + : '/usr/bin', + HOME: '/tmp', + }; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + const separator = process.platform === 'win32' ? ';' : ':'; + expect(result.command).toBe(command); + expect(result.env.PATH.split(separator)[0]).toBe(path.dirname(command)); + expect(result.env.HOME).toBe(env.HOME); + }); + + it('sets PATH to the command directory when PATH is empty', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = { PATH: '' }; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(path.dirname(command)); + }); + + it('sets PATH to the command directory when PATH is missing', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = {}; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(path.dirname(command)); + }); + + it('keeps PATH unchanged when the command is not absolute', async () => { + const env = { + PATH: process.platform === 'win32' + ? 'C:\\Windows;C:\\Windows\\System32' + : '/usr/bin:/bin', + }; + mockGetToolPath.mockReturnValue('claude'); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.command).toBe('claude'); + expect(result.env.PATH).toBe(env.PATH); + }); + + it('does not duplicate the command directory in PATH', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const commandDir = path.dirname(command); + const separator = process.platform === 'win32' ? ';' : ':'; + const env = { PATH: `${commandDir}${separator}/usr/bin` }; + + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(env.PATH); + }); + + it('treats PATH entries case-insensitively on Windows', async () => { + const originalPlatform = Object.getOwnPropertyDescriptor(process, 'platform'); + Object.defineProperty(process, 'platform', { value: 'win32' }); + + try { + const command = 'C:\\Tools\\claude\\claude.exe'; + const env = { PATH: 'c:\\tools\\claude;C:\\Windows' }; + + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(env.PATH); + } finally { + if (originalPlatform) { + Object.defineProperty(process, 'platform', originalPlatform); + } + } + }); +}); diff --git a/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts new file mode 100644 index 0000000000..b39c588a6d --- /dev/null +++ b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts @@ -0,0 +1,469 @@ +/** + * Unit tests for cli-tool-manager + * Tests CLI tool detection with focus on NVM path detection + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { existsSync, readdirSync } from 'fs'; +import os from 'os'; +import { execFileSync } from 'child_process'; +import { app } from 'electron'; +import { + getToolInfo, + clearToolCache, + getClaudeDetectionPaths, + sortNvmVersionDirs, + buildClaudeDetectionResult +} from '../cli-tool-manager'; + +// Mock Electron app +vi.mock('electron', () => ({ + app: { + isPackaged: false, + getPath: vi.fn() + } +})); + +// Mock os module +vi.mock('os', () => ({ + default: { + homedir: vi.fn(() => '/mock/home') + } +})); + +// Mock fs module - need to mock both sync and promises +vi.mock('fs', () => { + const mockDirent = ( + name: string, + isDir: boolean + ): { name: string; isDirectory: () => boolean } => ({ + name, + isDirectory: () => isDir + }); + + return { + existsSync: vi.fn(), + readdirSync: vi.fn(), + promises: {} + }; +}); + +// Mock child_process for execFileSync and execFile (used in validation) +vi.mock('child_process', () => ({ + execFileSync: vi.fn(), + execFile: vi.fn() +})); + +// Mock env-utils to avoid PATH augmentation complexity +vi.mock('../env-utils', () => ({ + findExecutable: vi.fn(() => null), // Return null to force platform-specific path checking + getAugmentedEnv: vi.fn(() => ({ PATH: '' })) +})); + +// Mock homebrew-python utility +vi.mock('../utils/homebrew-python', () => ({ + findHomebrewPython: vi.fn(() => null) +})); + +describe('cli-tool-manager - Claude CLI NVM detection', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Set default platform to Linux + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true + }); + }); + + afterEach(() => { + clearToolCache(); + }); + + const mockHomeDir = '/mock/home'; + + describe('NVM path detection on Unix/Linux/macOS', () => { + it('should detect Claude CLI in NVM directory when multiple Node versions exist', () => { + // Mock home directory + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + // Mock NVM directory exists + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + // NVM versions directory exists + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Claude CLI exists in v22.17.0 + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + // Mock readdirSync to return Node version directories + vi.mocked(readdirSync).mockImplementation((filePath, options) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'v20.11.0', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true } + ] as any; + } + return [] as any; + }); + + // Mock execFileSync to return version for validation + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v22.17.0'); + expect(result.path).toContain('bin/claude'); + expect(result.source).toBe('nvm'); + }); + + it('should try multiple NVM Node versions until finding Claude CLI', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Only v24.12.0 has Claude CLI + if (pathStr.includes('v24.12.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'v18.20.0', isDirectory: () => true }, + { name: 'v20.11.0', isDirectory: () => true }, + { name: 'v24.12.0', isDirectory: () => true } + ] as any; + } + return [] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v24.12.0'); + expect(result.source).toBe('nvm'); + }); + + it('should skip non-version directories in NVM (e.g., does not start with "v")', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Only the correctly named version has Claude + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'current', isDirectory: () => true }, // Should be skipped + { name: 'system', isDirectory: () => true }, // Should be skipped + { name: 'v22.17.0', isDirectory: () => true } // Should be checked + ] as any; + } + return [] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v22.17.0'); + }); + + it('should not check NVM paths on Windows', () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true + }); + + vi.mocked(os.homedir).mockReturnValue('C:\\Users\\test'); + + // Even if NVM directory exists on Windows, should not check it + vi.mocked(existsSync).mockReturnValue(false); + vi.mocked(readdirSync).mockReturnValue([]); + + const result = getToolInfo('claude'); + + // Should not be found from NVM on Windows + expect(result.source).not.toBe('nvm'); + }); + + it('should handle missing NVM directory gracefully', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + // NVM directory does not exist + vi.mocked(existsSync).mockReturnValue(false); + + const result = getToolInfo('claude'); + + // Should not find via NVM + expect(result.source).not.toBe('nvm'); + expect(result.found).toBe(false); + }); + + it('should handle readdirSync errors gracefully', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockReturnValue(true); + vi.mocked(readdirSync).mockImplementation(() => { + throw new Error('Permission denied'); + }); + + const result = getToolInfo('claude'); + + // Should not crash, should fall back to other detection methods + expect(result.source).not.toBe('nvm'); + }); + + it('should validate Claude CLI before returning NVM path', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation(() => { + return [{ name: 'v22.17.0', isDirectory: () => true }] as any; + }); + + // Mock validation failure (execFileSync throws) + vi.mocked(execFileSync).mockImplementation(() => { + throw new Error('Command failed'); + }); + + const result = getToolInfo('claude'); + + // Should not return unvalidated path + expect(result.found).toBe(false); + expect(result.source).not.toBe('nvm'); + }); + + it('should handle NVM directory with no version subdirectories', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + return String(filePath).includes('.nvm/versions/node'); + }); + + // Empty NVM directory + vi.mocked(readdirSync).mockReturnValue([]); + + const result = getToolInfo('claude'); + + expect(result.source).not.toBe('nvm'); + }); + }); + + describe('NVM on macOS', () => { + it('should detect Claude CLI via NVM on macOS', () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + writable: true + }); + + vi.mocked(os.homedir).mockReturnValue('/Users/test'); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation(() => { + return [{ name: 'v22.17.0', isDirectory: () => true }] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.source).toBe('nvm'); + expect(result.path).toContain('v22.17.0'); + }); + }); +}); + +/** + * Unit tests for helper functions + */ +describe('cli-tool-manager - Helper Functions', () => { + describe('getClaudeDetectionPaths', () => { + it('should return homebrew paths on macOS', () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + writable: true + }); + + const paths = getClaudeDetectionPaths('/Users/test'); + + expect(paths.homebrewPaths).toContain('/opt/homebrew/bin/claude'); + expect(paths.homebrewPaths).toContain('/usr/local/bin/claude'); + }); + + it('should return Windows paths on win32', () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true + }); + + const paths = getClaudeDetectionPaths('C:\\Users\\test'); + + // Windows paths should include AppData and Program Files + expect(paths.platformPaths.some(p => p.includes('AppData'))).toBe(true); + expect(paths.platformPaths.some(p => p.includes('Program Files'))).toBe(true); + }); + + it('should return Unix paths on Linux', () => { + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true + }); + + const paths = getClaudeDetectionPaths('/home/test'); + + expect(paths.platformPaths.some(p => p.includes('.local/bin/claude'))).toBe(true); + expect(paths.platformPaths.some(p => p.includes('bin/claude'))).toBe(true); + }); + + it('should return correct NVM versions directory', () => { + const paths = getClaudeDetectionPaths('/home/test'); + + expect(paths.nvmVersionsDir).toBe('/home/test/.nvm/versions/node'); + }); + }); + + describe('sortNvmVersionDirs', () => { + it('should sort versions in descending order (newest first)', () => { + const entries = [ + { name: 'v18.20.0', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true }, + { name: 'v20.11.0', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v22.17.0', 'v20.11.0', 'v18.20.0']); + }); + + it('should filter out non-version directories', () => { + const entries = [ + { name: 'v20.11.0', isDirectory: () => true }, + { name: '.DS_Store', isDirectory: () => false }, + { name: 'node_modules', isDirectory: () => true }, + { name: 'current', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v22.17.0', 'v20.11.0']); + expect(sorted).not.toContain('.DS_Store'); + expect(sorted).not.toContain('node_modules'); + expect(sorted).not.toContain('current'); + }); + + it('should return empty array when no valid versions', () => { + const entries = [ + { name: 'current', isDirectory: () => true }, + { name: 'system', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual([]); + }); + + it('should handle single entry', () => { + const entries = [{ name: 'v20.11.0', isDirectory: () => true }]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v20.11.0']); + }); + + it('should handle empty array', () => { + const sorted = sortNvmVersionDirs([]); + + expect(sorted).toEqual([]); + }); + }); + + describe('buildClaudeDetectionResult', () => { + it('should return null when validation fails', () => { + const result = buildClaudeDetectionResult( + '/path/to/claude', + { valid: false, message: 'Invalid CLI' }, + 'nvm', + 'Found via NVM' + ); + + expect(result).toBeNull(); + }); + + it('should return proper result when validation succeeds', () => { + const result = buildClaudeDetectionResult( + '/path/to/claude', + { valid: true, version: '1.0.0', message: 'Valid' }, + 'nvm', + 'Found via NVM' + ); + + expect(result).not.toBeNull(); + expect(result?.found).toBe(true); + expect(result?.path).toBe('/path/to/claude'); + expect(result?.version).toBe('1.0.0'); + expect(result?.source).toBe('nvm'); + expect(result?.message).toContain('Found via NVM'); + expect(result?.message).toContain('/path/to/claude'); + }); + + it('should include path in message', () => { + const result = buildClaudeDetectionResult( + '/home/user/.nvm/versions/node/v22.17.0/bin/claude', + { valid: true, version: '2.0.0', message: 'OK' }, + 'nvm', + 'Detected Claude CLI' + ); + + expect(result?.message).toContain('Detected Claude CLI'); + expect(result?.message).toContain('/home/user/.nvm/versions/node/v22.17.0/bin/claude'); + }); + }); +}); diff --git a/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts new file mode 100644 index 0000000000..bbcbdc354a --- /dev/null +++ b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts @@ -0,0 +1,232 @@ +import { EventEmitter } from 'events'; +import path from 'path'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; +import { IPC_CHANNELS } from '../../shared/constants'; +const { + mockGetClaudeCliInvocation, + mockGetClaudeCliInvocationAsync, + mockGetProject, + spawnMock, + mockIpcMain, +} = vi.hoisted(() => { + const ipcMain = new (class { + handlers = new Map(); + + handle(channel: string, handler: Function): void { + this.handlers.set(channel, handler); + } + + getHandler(channel: string): Function | undefined { + return this.handlers.get(channel); + } + })(); + + return { + mockGetClaudeCliInvocation: vi.fn(), + mockGetClaudeCliInvocationAsync: vi.fn(), + mockGetProject: vi.fn(), + spawnMock: vi.fn(), + mockIpcMain: ipcMain, + }; +}); + +vi.mock('../claude-cli-utils', () => ({ + getClaudeCliInvocation: mockGetClaudeCliInvocation, + getClaudeCliInvocationAsync: mockGetClaudeCliInvocationAsync, +})); + +vi.mock('../project-store', () => ({ + projectStore: { + getProject: mockGetProject, + }, +})); + +vi.mock('child_process', () => ({ + spawn: spawnMock, +})); + +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return path.join('/tmp', 'userData'); + return '/tmp'; + }), + }, + ipcMain: mockIpcMain, +})); + +import { registerEnvHandlers } from '../ipc-handlers/env-handlers'; + +function createProc(): EventEmitter & { stdout?: EventEmitter; stderr?: EventEmitter } { + const proc = new EventEmitter() as EventEmitter & { + stdout?: EventEmitter; + stderr?: EventEmitter; + }; + proc.stdout = new EventEmitter(); + proc.stderr = new EventEmitter(); + return proc; +} + +// Helper to flush all pending promises (needed for async mock resolution) +function flushPromises(): Promise { + return new Promise(resolve => setTimeout(resolve, 0)); +} + +describe('env-handlers Claude CLI usage', () => { + beforeEach(() => { + mockGetClaudeCliInvocation.mockReset(); + mockGetClaudeCliInvocationAsync.mockReset(); + mockGetProject.mockReset(); + spawnMock.mockReset(); + }); + + it('uses resolved Claude CLI path/env for auth checks', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p1', path: '/tmp/project' }); + + const procs: ReturnType[] = []; + spawnMock.mockImplementation(() => { + const proc = createProc(); + procs.push(proc); + return proc; + }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const resultPromise = handler({}, 'p1'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledTimes(1); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['--version'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + + procs[0].emit('close', 0); + await Promise.resolve(); + + expect(spawnMock).toHaveBeenCalledTimes(2); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['api', '--help'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + + procs[1].emit('close', 0); + + const result = await resultPromise; + expect(result).toEqual({ success: true, data: { success: true, authenticated: true } }); + }); + + it('uses resolved Claude CLI path/env for setup-token', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p2', path: '/tmp/project' }); + + const proc = createProc(); + spawnMock.mockReturnValue(proc); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_INVOKE_CLAUDE_SETUP); + if (!handler) { + throw new Error('ENV_INVOKE_CLAUDE_SETUP handler not registered'); + } + + const resultPromise = handler({}, 'p2'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['setup-token'], + expect.objectContaining({ + cwd: '/tmp/project', + env: claudeEnv, + shell: false, + stdio: 'inherit' + }) + ); + + proc.emit('close', 0); + const result = await resultPromise; + expect(result).toEqual({ success: true, data: { success: true, authenticated: true } }); + }); + + it('returns an error when Claude CLI resolution throws', async () => { + mockGetClaudeCliInvocationAsync.mockRejectedValue(new Error('Claude CLI exploded')); + mockGetProject.mockReturnValue({ id: 'p3', path: '/tmp/project' }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const result = await handler({}, 'p3'); + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI exploded'); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it('returns an error when Claude CLI command is missing', async () => { + mockGetClaudeCliInvocationAsync.mockResolvedValue({ command: '', env: {} }); + mockGetProject.mockReturnValue({ id: 'p4', path: '/tmp/project' }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const result = await handler({}, 'p4'); + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI path not resolved'); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it('returns an error when Claude CLI exits with a non-zero code', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p5', path: '/tmp/project' }); + + const proc = createProc(); + spawnMock.mockReturnValue(proc); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const resultPromise = handler({}, 'p5'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['--version'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + proc.emit('close', 1); + + const result = await resultPromise; + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI not found'); + }); +}); diff --git a/apps/frontend/src/main/__tests__/insights-config.test.ts b/apps/frontend/src/main/__tests__/insights-config.test.ts new file mode 100644 index 0000000000..5775d65ab0 --- /dev/null +++ b/apps/frontend/src/main/__tests__/insights-config.test.ts @@ -0,0 +1,99 @@ +/** + * @vitest-environment node + */ +import path from 'path'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { InsightsConfig } from '../insights/config'; + +vi.mock('electron', () => ({ + app: { + getAppPath: () => '/app', + getPath: () => '/tmp', + isPackaged: false + } +})); + +vi.mock('../rate-limit-detector', () => ({ + getProfileEnv: () => ({ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token' }) +})); + +const mockGetApiProfileEnv = vi.fn(); +vi.mock('../services/profile', () => ({ + getAPIProfileEnv: (...args: unknown[]) => mockGetApiProfileEnv(...args) +})); + +const mockGetPythonEnv = vi.fn(); +vi.mock('../python-env-manager', () => ({ + pythonEnvManager: { + getPythonEnv: () => mockGetPythonEnv() + } +})); + +describe('InsightsConfig', () => { + const originalEnv = { ...process.env }; + + beforeEach(() => { + process.env = { ...originalEnv, TEST_ENV: 'ok' }; + mockGetApiProfileEnv.mockResolvedValue({ + ANTHROPIC_BASE_URL: 'https://api.z.ai', + ANTHROPIC_AUTH_TOKEN: 'key' + }); + mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' }); + }); + + afterEach(() => { + process.env = { ...originalEnv }; + vi.clearAllMocks(); + vi.restoreAllMocks(); + }); + + it('should build process env with python and profile settings', async () => { + const config = new InsightsConfig(); + vi.spyOn(config, 'loadAutoBuildEnv').mockReturnValue({ CUSTOM_ENV: '1' }); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend'); + + const env = await config.getProcessEnv(); + + expect(env.TEST_ENV).toBe('ok'); + expect(env.CUSTOM_ENV).toBe('1'); + expect(env.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token'); + expect(env.ANTHROPIC_BASE_URL).toBe('https://api.z.ai'); + expect(env.ANTHROPIC_AUTH_TOKEN).toBe('key'); + expect(env.PYTHONPATH).toBe(['/site-packages', '/backend'].join(path.delimiter)); + }); + + it('should clear ANTHROPIC env vars in OAuth mode when no API profile is set', async () => { + const config = new InsightsConfig(); + mockGetApiProfileEnv.mockResolvedValue({}); + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'stale-token', + ANTHROPIC_BASE_URL: 'https://stale.example' + }; + + const env = await config.getProcessEnv(); + + expect(env.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(env.ANTHROPIC_BASE_URL).toBe(''); + }); + + it('should set PYTHONPATH only to auto-build path when python env has none', async () => { + const config = new InsightsConfig(); + mockGetPythonEnv.mockReturnValue({}); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend'); + + const env = await config.getProcessEnv(); + + expect(env.PYTHONPATH).toBe('/backend'); + }); + + it('should keep PYTHONPATH from python env when auto-build path is missing', async () => { + const config = new InsightsConfig(); + mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' }); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue(null); + + const env = await config.getProcessEnv(); + + expect(env.PYTHONPATH).toBe('/site-packages'); + }); +}); diff --git a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts index 86699e5c7c..af33364513 100644 --- a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts +++ b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts @@ -139,7 +139,8 @@ function cleanupTestDirs(): void { } } -describe('IPC Handlers', () => { +// Increase timeout for all tests in this file due to dynamic imports and setup overhead +describe('IPC Handlers', { timeout: 15000 }, () => { let ipcMain: EventEmitter & { handlers: Map; invokeHandler: (channel: string, event: unknown, ...args: unknown[]) => Promise; diff --git a/apps/frontend/src/main/agent/agent-manager.ts b/apps/frontend/src/main/agent/agent-manager.ts index a0d65d1fae..0f387d1865 100644 --- a/apps/frontend/src/main/agent/agent-manager.ts +++ b/apps/frontend/src/main/agent/agent-manager.ts @@ -87,14 +87,14 @@ export class AgentManager extends EventEmitter { /** * Start spec creation process */ - startSpecCreation( + async startSpecCreation( taskId: string, projectPath: string, taskDescription: string, specDir?: string, metadata?: SpecCreationMetadata, baseBranch?: string - ): void { + ): Promise { // Pre-flight auth check: Verify active profile has valid authentication const profileManager = getClaudeProfileManager(); if (!profileManager.hasValidAuth()) { @@ -156,18 +156,18 @@ export class AgentManager extends EventEmitter { this.storeTaskContext(taskId, projectPath, '', {}, true, taskDescription, specDir, metadata, baseBranch); // Note: This is spec-creation but it chains to task-execution via run.py - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); } /** * Start task execution (run.py) */ - startTaskExecution( + async startTaskExecution( taskId: string, projectPath: string, specId: string, options: TaskExecutionOptions = {} - ): void { + ): Promise { // Pre-flight auth check: Verify active profile has valid authentication const profileManager = getClaudeProfileManager(); if (!profileManager.hasValidAuth()) { @@ -213,17 +213,17 @@ export class AgentManager extends EventEmitter { // Store context for potential restart this.storeTaskContext(taskId, projectPath, specId, options, false); - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); } /** * Start QA process */ - startQAProcess( + async startQAProcess( taskId: string, projectPath: string, specId: string - ): void { + ): Promise { const autoBuildSource = this.processManager.getAutoBuildSourcePath(); if (!autoBuildSource) { @@ -243,7 +243,7 @@ export class AgentManager extends EventEmitter { const args = [runPath, '--spec', specId, '--project-dir', projectPath, '--qa']; - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process'); } /** diff --git a/apps/frontend/src/main/agent/agent-process.test.ts b/apps/frontend/src/main/agent/agent-process.test.ts new file mode 100644 index 0000000000..db992bb598 --- /dev/null +++ b/apps/frontend/src/main/agent/agent-process.test.ts @@ -0,0 +1,494 @@ +/** + * Integration tests for AgentProcessManager + * Tests API profile environment variable injection into spawnProcess + * + * Story 2.3: Env Var Injection - AC1, AC2, AC3, AC4 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { EventEmitter } from 'events'; + +// Create a mock process object that will be returned by spawn +function createMockProcess() { + return { + stdout: { on: vi.fn() }, + stderr: { on: vi.fn() }, + on: vi.fn((event: string, callback: any) => { + if (event === 'exit') { + // Simulate immediate exit with code 0 + setTimeout(() => callback(0), 10); + } + }), + kill: vi.fn() + }; +} + +// Mock child_process - must be BEFORE imports of modules that use it +const spawnCalls: Array<{ command: string; args: string[]; options: { env: Record; cwd?: string; [key: string]: unknown } }> = []; + +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + const mockSpawn = vi.fn((command: string, args: string[], options: { env: Record; cwd?: string; [key: string]: unknown }) => { + // Record the call for test assertions + spawnCalls.push({ command, args, options }); + return createMockProcess(); + }); + + return { + ...actual, + spawn: mockSpawn, + execSync: vi.fn((command: string) => { + if (command.includes('git')) { + return '/fake/path'; + } + return ''; + }) + }; +}); + +// Mock project-initializer to avoid child_process.execSync issues +vi.mock('../project-initializer', () => ({ + getAutoBuildPath: vi.fn(() => '/fake/auto-build'), + isInitialized: vi.fn(() => true), + initializeProject: vi.fn(), + getProjectStorePath: vi.fn(() => '/fake/store/path') +})); + +// Mock project-store BEFORE agent-process imports it +vi.mock('../project-store', () => ({ + projectStore: { + getProject: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + updateProject: vi.fn(), + deleteProject: vi.fn(), + getProjectSettings: vi.fn(), + updateProjectSettings: vi.fn() + } +})); + +// Mock claude-profile-manager +vi.mock('../claude-profile-manager', () => ({ + getClaudeProfileManager: vi.fn(() => ({ + getProfilePath: vi.fn(() => '/fake/profile/path'), + ensureProfileDir: vi.fn(), + readProfile: vi.fn(), + writeProfile: vi.fn(), + deleteProfile: vi.fn() + })) +})); + +// Mock dependencies +vi.mock('../services/profile', () => ({ + getAPIProfileEnv: vi.fn() +})); + +vi.mock('../rate-limit-detector', () => ({ + getProfileEnv: vi.fn(() => ({})), + detectRateLimit: vi.fn(() => ({ isRateLimited: false })), + createSDKRateLimitInfo: vi.fn(), + detectAuthFailure: vi.fn(() => ({ isAuthFailure: false })) +})); + +vi.mock('../python-detector', () => ({ + findPythonCommand: vi.fn(() => 'python'), + parsePythonCommand: vi.fn(() => ['python', []]) +})); + +vi.mock('electron', () => ({ + app: { + getAppPath: vi.fn(() => '/fake/app/path') + } +})); + +// Import AFTER all mocks are set up +import { AgentProcessManager } from './agent-process'; +import { AgentState } from './agent-state'; +import { AgentEvents } from './agent-events'; +import * as profileService from '../services/profile'; +import * as rateLimitDetector from '../rate-limit-detector'; + +describe('AgentProcessManager - API Profile Env Injection (Story 2.3)', () => { + let processManager: AgentProcessManager; + let state: AgentState; + let events: AgentEvents; + let emitter: EventEmitter; + + beforeEach(() => { + // Reset all mocks and spawn calls + vi.clearAllMocks(); + spawnCalls.length = 0; + + // Clear environment variables that could interfere with tests + delete process.env.ANTHROPIC_AUTH_TOKEN; + delete process.env.ANTHROPIC_BASE_URL; + delete process.env.CLAUDE_CODE_OAUTH_TOKEN; + + // Initialize components + state = new AgentState(); + events = new AgentEvents(); + emitter = new EventEmitter(); + processManager = new AgentProcessManager(state, events, emitter); + }); + + afterEach(() => { + processManager.killAllProcesses(); + }); + + describe('AC1: API Profile Env Var Injection', () => { + it('should inject ANTHROPIC_BASE_URL when active profile has baseUrl', async () => { + const mockApiProfileEnv = { + ANTHROPIC_BASE_URL: 'https://custom.api.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].command).toBe('python'); + expect(spawnCalls[0].args).toContain('run.py'); + expect(spawnCalls[0].options.env).toMatchObject({ + ANTHROPIC_BASE_URL: 'https://custom.api.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key' + }); + }); + + it('should inject ANTHROPIC_AUTH_TOKEN when active profile has apiKey', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-custom-key-12345678' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-custom-key-12345678'); + }); + + it('should inject model env vars when active profile has models configured', async () => { + const mockApiProfileEnv = { + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].options.env).toMatchObject({ + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should give API profile env vars highest precedence over extraEnv', async () => { + const extraEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-extra-token', + ANTHROPIC_BASE_URL: 'https://extra.com' + }; + + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-token', + ANTHROPIC_BASE_URL: 'https://profile.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + // API profile should override extraEnv + expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-token'); + expect(spawnCalls[0].options.env.ANTHROPIC_BASE_URL).toBe('https://profile.com'); + }); + }); + + describe('AC2: OAuth Mode (No Active Profile)', () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + // Save original environment before each test + originalEnv = { ...process.env }; + }); + + afterEach(() => { + // Restore original environment after each test + process.env = originalEnv; + }); + + it('should NOT set ANTHROPIC_AUTH_TOKEN when no active profile (OAuth mode)', async () => { + // Return empty object = OAuth mode + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + // Set OAuth token via getProfileEnv (existing flow) + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-123' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + const envArg = spawnCalls[0].options.env as Record; + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-123'); + // OAuth mode clears ANTHROPIC_AUTH_TOKEN with empty string (not undefined) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + }); + + it('should return empty object from getAPIProfileEnv when activeProfileId is null', async () => { + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + const result = await profileService.getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should clear stale ANTHROPIC_AUTH_TOKEN from process.env when switching to OAuth mode', async () => { + // Simulate process.env having stale ANTHROPIC_* vars from previous session + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'stale-token-from-env', + ANTHROPIC_BASE_URL: 'https://stale.example.com' + }; + + // OAuth mode - no active API profile + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + // Set OAuth token + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-456' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // OAuth token should be present + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-456'); + + // Stale ANTHROPIC_* vars should be cleared (empty string overrides process.env) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + }); + + it('should clear stale ANTHROPIC_BASE_URL when switching to OAuth mode', async () => { + process.env = { + ...originalEnv, + ANTHROPIC_BASE_URL: 'https://old-custom-endpoint.com' + }; + + // OAuth mode + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-789' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Should clear the base URL (so Python uses default api.anthropic.com) + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-789'); + }); + + it('should NOT clear ANTHROPIC_* vars when API Profile is active', async () => { + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'old-token-in-env' + }; + + // API Profile mode - active profile + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-active', + ANTHROPIC_BASE_URL: 'https://active-profile.com' + }; + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Should use API profile vars, NOT clear them + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-active'); + expect(envArg.ANTHROPIC_BASE_URL).toBe('https://active-profile.com'); + }); + }); + + describe('AC4: No API Key Logging', () => { + it('should never log full API keys in spawn env vars', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-sensitive-api-key-12345678', + ANTHROPIC_BASE_URL: 'https://api.example.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + // Mock ALL console methods to capture any debug/error output + const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); + const consoleDebugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + // Get the env object passed to spawn + const envArg = spawnCalls[0].options.env as Record; + + // Verify the full API key is in the env (for Python subprocess) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-sensitive-api-key-12345678'); + + // Collect ALL console output from all methods + const allLogCalls = [ + ...consoleLogSpy.mock.calls, + ...consoleErrorSpy.mock.calls, + ...consoleWarnSpy.mock.calls, + ...consoleDebugSpy.mock.calls + ].flatMap(call => call.map(String)); + const logString = JSON.stringify(allLogCalls); + + // The full API key should NOT appear in any logs (AC4 compliance) + expect(logString).not.toContain('sk-sensitive-api-key-12345678'); + + // Restore all spies + consoleLogSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleDebugSpy.mockRestore(); + }); + + it('should not log API key even in error scenarios', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-secret-key-for-error-test', + ANTHROPIC_BASE_URL: 'https://api.example.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + // Mock console methods + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + // Collect all error and log output + const allOutput = [ + ...consoleErrorSpy.mock.calls, + ...consoleLogSpy.mock.calls + ].flatMap(call => call.map(arg => typeof arg === 'object' ? JSON.stringify(arg) : String(arg))); + const outputString = allOutput.join(' '); + + // Verify API key is never exposed in logs + expect(outputString).not.toContain('sk-secret-key-for-error-test'); + + consoleErrorSpy.mockRestore(); + consoleLogSpy.mockRestore(); + }); + }); + + describe('AC3: Profile Switching Between Builds', () => { + it('should allow different profiles for different spawn calls', async () => { + // First spawn with Profile A + const profileAEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-a', + ANTHROPIC_BASE_URL: 'https://api-a.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileAEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const firstEnv = spawnCalls[0].options.env as Record; + expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a'); + + // Second spawn with Profile B (user switched active profile) + const profileBEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-b', + ANTHROPIC_BASE_URL: 'https://api-b.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileBEnv); + + await processManager.spawnProcess('task-2', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const secondEnv = spawnCalls[1].options.env as Record; + expect(secondEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-b'); + + // Verify first spawn's env is NOT affected by second spawn + expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a'); + }); + }); + + describe('Integration: Combined env precedence', () => { + it('should merge env vars in correct precedence order', async () => { + const extraEnv = { + CUSTOM_VAR: 'from-extra' + }; + + const profileEnv = { + CLAUDE_CONFIG_DIR: '/custom/config' + }; + + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-api-profile', + ANTHROPIC_BASE_URL: 'https://api-profile.com' + }; + + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue(profileEnv); + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(apiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Verify all sources are included + expect(envArg.CUSTOM_VAR).toBe('from-extra'); // From extraEnv + expect(envArg.CLAUDE_CONFIG_DIR).toBe('/custom/config'); // From profileEnv + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-api-profile'); // From apiProfileEnv (highest for ANTHROPIC_*) + + // Verify standard Python env vars + expect(envArg.PYTHONUNBUFFERED).toBe('1'); + expect(envArg.PYTHONIOENCODING).toBe('utf-8'); + expect(envArg.PYTHONUTF8).toBe('1'); + }); + + it('should call getOAuthModeClearVars and apply clearing when in OAuth mode', async () => { + // OAuth mode - empty API profile + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Verify clearing vars are applied (empty strings for ANTHROPIC_* vars) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + expect(envArg.ANTHROPIC_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe(''); + }); + + it('should handle getAPIProfileEnv errors gracefully', async () => { + // Simulate service error + vi.mocked(profileService.getAPIProfileEnv).mockRejectedValue(new Error('Service unavailable')); + + // Should not throw - should fall back to OAuth mode + await expect( + processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution') + ).resolves.not.toThrow(); + + const envArg = spawnCalls[0].options.env as Record; + + // Should have clearing vars (falls back to OAuth mode on error) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + }); + }); +}); diff --git a/apps/frontend/src/main/agent/agent-process.ts b/apps/frontend/src/main/agent/agent-process.ts index ef045555c0..03010bf959 100644 --- a/apps/frontend/src/main/agent/agent-process.ts +++ b/apps/frontend/src/main/agent/agent-process.ts @@ -7,6 +7,7 @@ import { AgentState } from './agent-state'; import { AgentEvents } from './agent-events'; import { ProcessType, ExecutionProgressData } from './types'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv, detectAuthFailure } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; import { projectStore } from '../project-store'; import { getClaudeProfileManager } from '../claude-profile-manager'; import { parsePythonCommand, validatePythonPath } from '../python-detector'; @@ -14,6 +15,64 @@ import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager import { buildMemoryEnvVars } from '../memory-env-builder'; import { readSettingsFile } from '../settings-utils'; import type { AppSettings } from '../../shared/types/settings'; +import { getOAuthModeClearVars } from './env-utils'; +import { getAugmentedEnv } from '../env-utils'; +import { getToolInfo } from '../cli-tool-manager'; + + +function deriveGitBashPath(gitExePath: string): string | null { + if (process.platform !== 'win32') { + return null; + } + + try { + const gitDir = path.dirname(gitExePath); // e.g., D:\...\Git\mingw64\bin + const gitDirName = path.basename(gitDir).toLowerCase(); + + // Find Git installation root + let gitRoot: string; + + if (gitDirName === 'cmd') { + // .../Git/cmd/git.exe -> .../Git + gitRoot = path.dirname(gitDir); + } else if (gitDirName === 'bin') { + // Could be .../Git/bin/git.exe OR .../Git/mingw64/bin/git.exe + const parent = path.dirname(gitDir); + const parentName = path.basename(parent).toLowerCase(); + if (parentName === 'mingw64' || parentName === 'mingw32') { + // .../Git/mingw64/bin/git.exe -> .../Git + gitRoot = path.dirname(parent); + } else { + // .../Git/bin/git.exe -> .../Git + gitRoot = parent; + } + } else { + // Unknown structure - try to find 'bin' sibling + gitRoot = path.dirname(gitDir); + } + + // Bash.exe is in Git/bin/bash.exe + const bashPath = path.join(gitRoot, 'bin', 'bash.exe'); + + if (existsSync(bashPath)) { + console.log('[AgentProcess] Derived git-bash path:', bashPath); + return bashPath; + } + + // Fallback: check one level up if gitRoot didn't work + const altBashPath = path.join(path.dirname(gitRoot), 'bin', 'bash.exe'); + if (existsSync(altBashPath)) { + console.log('[AgentProcess] Found git-bash at alternate path:', altBashPath); + return altBashPath; + } + + console.warn('[AgentProcess] Could not find bash.exe from git path:', gitExePath); + return null; + } catch (error) { + console.error('[AgentProcess] Error deriving git-bash path:', error); + return null; + } +} /** * Process spawning and lifecycle management @@ -53,8 +112,31 @@ export class AgentProcessManager { extraEnv: Record ): NodeJS.ProcessEnv { const profileEnv = getProfileEnv(); + // Use getAugmentedEnv() to ensure common tool paths (dotnet, homebrew, etc.) + // are available even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); + + // On Windows, detect and pass git-bash path for Claude Code CLI + // Electron can detect git via where.exe, but Python subprocess may not have the same PATH + const gitBashEnv: Record = {}; + if (process.platform === 'win32' && !process.env.CLAUDE_CODE_GIT_BASH_PATH) { + try { + const gitInfo = getToolInfo('git'); + if (gitInfo.found && gitInfo.path) { + const bashPath = deriveGitBashPath(gitInfo.path); + if (bashPath) { + gitBashEnv['CLAUDE_CODE_GIT_BASH_PATH'] = bashPath; + console.log('[AgentProcess] Setting CLAUDE_CODE_GIT_BASH_PATH:', bashPath); + } + } + } catch (error) { + console.warn('[AgentProcess] Failed to detect git-bash path:', error); + } + } + return { - ...process.env, + ...augmentedEnv, + ...gitBashEnv, ...extraEnv, ...profileEnv, PYTHONUNBUFFERED: '1', @@ -195,6 +277,8 @@ export class AgentProcessManager { // Auto-detect from app location (configured path was invalid or not set) const possiblePaths = [ + // Packaged app: backend is in extraResources (process.resourcesPath/backend) + ...(app.isPackaged ? [path.join(process.resourcesPath, 'backend')] : []), // Dev mode: from dist/main -> ../../backend (apps/frontend/out/main -> apps/backend) path.resolve(__dirname, '..', '..', '..', 'backend'), // Alternative: from app root -> apps/backend @@ -238,19 +322,10 @@ export class AgentProcessManager { } /** - * Load environment variables from project's .auto-claude/.env file - * This contains frontend-configured settings like memory/Graphiti configuration + * Parse environment variables from a .env file content. + * Filters out empty values to prevent overriding valid tokens from profiles. */ - private loadProjectEnv(projectPath: string): Record { - // Find project by path to get autoBuildPath - const projects = projectStore.getProjects(); - const project = projects.find((p) => p.path === projectPath); - - if (!project?.autoBuildPath) { - return {}; - } - - const envPath = path.join(projectPath, project.autoBuildPath, '.env'); + private parseEnvFile(envPath: string): Record { if (!existsSync(envPath)) { return {}; } @@ -274,11 +349,14 @@ export class AgentProcessManager { // Remove quotes if present if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { + (value.startsWith("'") && value.endsWith("'"))) { value = value.slice(1, -1); } - envVars[key] = value; + // Skip empty values to prevent overriding valid values from other sources + if (value) { + envVars[key] = value; + } } } @@ -288,6 +366,23 @@ export class AgentProcessManager { } } + /** + * Load environment variables from project's .auto-claude/.env file + * This contains frontend-configured settings like memory/Graphiti configuration + */ + private loadProjectEnv(projectPath: string): Record { + // Find project by path to get autoBuildPath + const projects = projectStore.getProjects(); + const project = projects.find((p) => p.path === projectPath); + + if (!project?.autoBuildPath) { + return {}; + } + + const envPath = path.join(projectPath, project.autoBuildPath, '.env'); + return this.parseEnvFile(envPath); + } + /** * Load environment variables from auto-claude .env file */ @@ -298,50 +393,19 @@ export class AgentProcessManager { } const envPath = path.join(autoBuildSource, '.env'); - if (!existsSync(envPath)) { - return {}; - } - - try { - const envContent = readFileSync(envPath, 'utf-8'); - const envVars: Record = {}; - - // Handle both Unix (\n) and Windows (\r\n) line endings - for (const line of envContent.split(/\r?\n/)) { - const trimmed = line.trim(); - // Skip comments and empty lines - if (!trimmed || trimmed.startsWith('#')) { - continue; - } - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - let value = trimmed.substring(eqIndex + 1).trim(); - - // Remove quotes if present - if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { - value = value.slice(1, -1); - } - - envVars[key] = value; - } - } - - return envVars; - } catch { - return {}; - } + return this.parseEnvFile(envPath); } - spawnProcess( + /** + * Spawn a Python process for task execution + */ + async spawnProcess( taskId: string, cwd: string, args: string[], extraEnv: Record = {}, processType: ProcessType = 'task-execution' - ): void { + ): Promise { const isSpecRunner = processType === 'spec-creation'; this.killProcess(taskId); @@ -351,13 +415,27 @@ export class AgentProcessManager { // Get Python environment (PYTHONPATH for bundled packages, etc.) const pythonEnv = pythonEnvManager.getPythonEnv(); - // Parse Python command to handle space-separated commands like "py -3" + // Get active API profile environment variables + let apiProfileEnv: Record = {}; + try { + apiProfileEnv = await getAPIProfileEnv(); + } catch (error) { + console.error('[Agent Process] Failed to get API profile env:', error); + // Continue with empty profile env (falls back to OAuth mode) + } + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + + // Parse Python commandto handle space-separated commands like "py -3" const [pythonCommand, pythonBaseArgs] = parsePythonCommand(this.getPythonPath()); const childProcess = spawn(pythonCommand, [...pythonBaseArgs, ...args], { cwd, env: { ...env, // Already includes process.env, extraEnv, profileEnv, PYTHONUNBUFFERED, PYTHONUTF8 - ...pythonEnv // Include Python environment (PYTHONPATH for bundled packages) + ...pythonEnv, // Include Python environment (PYTHONPATH for bundled packages) + ...oauthModeClearVars, // Clear stale ANTHROPIC_* vars when in OAuth mode + ...apiProfileEnv // Include active API profile config (highest priority for ANTHROPIC_* vars) } }); diff --git a/apps/frontend/src/main/agent/agent-queue.ts b/apps/frontend/src/main/agent/agent-queue.ts index 913290b35c..1d18be761b 100644 --- a/apps/frontend/src/main/agent/agent-queue.ts +++ b/apps/frontend/src/main/agent/agent-queue.ts @@ -7,8 +7,9 @@ import { AgentEvents } from './agent-events'; import { AgentProcessManager } from './agent-process'; import { RoadmapConfig } from './types'; import type { IdeationConfig, Idea } from '../../shared/types'; -import { MODEL_ID_MAP } from '../../shared/constants'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; +import { getOAuthModeClearVars } from './env-utils'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { parsePythonCommand } from '../python-detector'; import { pythonEnvManager } from '../python-env-manager'; @@ -37,6 +38,40 @@ export class AgentQueueManager { this.emitter = emitter; } + /** + * Ensure Python environment is ready before spawning processes. + * Prevents the race condition where generation starts before dependencies are installed, + * which would cause it to fall back to system Python and fail with ModuleNotFoundError. + * + * @param projectId - The project ID for error event emission + * @param eventType - The error event type to emit on failure + * @returns true if environment is ready, false if initialization failed (error already emitted) + */ + private async ensurePythonEnvReady( + projectId: string, + eventType: 'ideation-error' | 'roadmap-error' + ): Promise { + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + + if (!pythonEnvManager.isEnvReady()) { + debugLog('[Agent Queue] Python environment not ready, waiting for initialization...'); + if (autoBuildSource) { + const status = await pythonEnvManager.initialize(autoBuildSource); + if (!status.ready) { + debugError('[Agent Queue] Python environment initialization failed:', status.error); + this.emitter.emit(eventType, projectId, `Python environment not ready: ${status.error || 'initialization failed'}`); + return false; + } + debugLog('[Agent Queue] Python environment now ready'); + } else { + debugError('[Agent Queue] Cannot initialize Python - auto-build source not found'); + this.emitter.emit(eventType, projectId, 'Python environment not ready: auto-build source not found'); + return false; + } + } + return true; + } + /** * Start roadmap generation process * @@ -44,14 +79,14 @@ export class AgentQueueManager { * This allows refreshing competitor data independently of the general roadmap refresh. * Use when user explicitly wants new competitor research. */ - startRoadmapGeneration( + async startRoadmapGeneration( projectId: string, projectPath: string, refresh: boolean = false, enableCompetitorAnalysis: boolean = false, refreshCompetitorAnalysis: boolean = false, config?: RoadmapConfig - ): void { + ): Promise { debugLog('[Agent Queue] Starting roadmap generation:', { projectId, projectPath, @@ -94,9 +129,9 @@ export class AgentQueueManager { } // Add model and thinking level from config + // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars if (config?.model) { - const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus']; - args.push('--model', modelId); + args.push('--model', config.model); } if (config?.thinkingLevel) { args.push('--thinking-level', config.thinkingLevel); @@ -105,18 +140,18 @@ export class AgentQueueManager { debugLog('[Agent Queue] Spawning roadmap process with args:', args); // Use projectId as taskId for roadmap operations - this.spawnRoadmapProcess(projectId, projectPath, args); + await this.spawnRoadmapProcess(projectId, projectPath, args); } /** * Start ideation generation process */ - startIdeationGeneration( + async startIdeationGeneration( projectId: string, projectPath: string, config: IdeationConfig, refresh: boolean = false - ): void { + ): Promise { debugLog('[Agent Queue] Starting ideation generation:', { projectId, projectPath, @@ -170,9 +205,9 @@ export class AgentQueueManager { } // Add model and thinking level from config + // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars if (config.model) { - const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus']; - args.push('--model', modelId); + args.push('--model', config.model); } if (config.thinkingLevel) { args.push('--thinking-level', config.thinkingLevel); @@ -181,19 +216,28 @@ export class AgentQueueManager { debugLog('[Agent Queue] Spawning ideation process with args:', args); // Use projectId as taskId for ideation operations - this.spawnIdeationProcess(projectId, projectPath, args); + await this.spawnIdeationProcess(projectId, projectPath, args); } /** * Spawn a Python process for ideation generation */ - private spawnIdeationProcess( + private async spawnIdeationProcess( projectId: string, projectPath: string, args: string[] - ): void { + ): Promise { debugLog('[Agent Queue] Spawning ideation process:', { projectId, projectPath }); + // Run from auto-claude source directory so imports work correctly + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + const cwd = autoBuildSource || process.cwd(); + + // Ensure Python environment is ready before spawning + if (!await this.ensurePythonEnvReady(projectId, 'ideation-error')) { + return; + } + // Kill existing process for this project if any const wasKilled = this.processManager.killProcess(projectId); if (wasKilled) { @@ -204,9 +248,6 @@ export class AgentQueueManager { const spawnId = this.state.generateSpawnId(); debugLog('[Agent Queue] Generated spawn ID:', spawnId); - // Run from auto-claude source directory so imports work correctly - const autoBuildSource = this.processManager.getAutoBuildSourcePath(); - const cwd = autoBuildSource || process.cwd(); // Get combined environment variables const combinedEnv = this.processManager.getCombinedEnv(projectPath); @@ -214,6 +255,12 @@ export class AgentQueueManager { // Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default) const profileEnv = getProfileEnv(); + // Get active API profile environment variables + const apiProfileEnv = await getAPIProfileEnv(); + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + // Get Python path from process manager (uses venv if configured) const pythonPath = this.processManager.getPythonPath(); @@ -234,28 +281,30 @@ export class AgentQueueManager { // 1. process.env (system) // 2. pythonEnv (bundled packages environment) // 3. combinedEnv (auto-claude/.env for CLI usage) - // 4. profileEnv (Electron app OAuth token - highest priority) - // 5. Our specific overrides + // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode) + // 5. profileEnv (Electron app OAuth token) + // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars) + // 7. Our specific overrides const finalEnv = { ...process.env, ...pythonEnv, ...combinedEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONPATH: combinedPythonPath, PYTHONUNBUFFERED: '1', PYTHONUTF8: '1' }; - // Debug: Show OAuth token source + // Debug: Show OAuth token source (token values intentionally omitted for security - AC4) const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'Electron app profile' : (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found'); - const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; - const hasToken = !!oauthToken; + const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; debugLog('[Agent Queue] OAuth token status:', { source: tokenSource, - hasToken, - tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none' + hasToken }); // Parse Python command to handle space-separated commands like "py -3" @@ -500,13 +549,22 @@ export class AgentQueueManager { /** * Spawn a Python process for roadmap generation */ - private spawnRoadmapProcess( + private async spawnRoadmapProcess( projectId: string, projectPath: string, args: string[] - ): void { + ): Promise { debugLog('[Agent Queue] Spawning roadmap process:', { projectId, projectPath }); + // Run from auto-claude source directory so imports work correctly + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + const cwd = autoBuildSource || process.cwd(); + + // Ensure Python environment is ready before spawning + if (!await this.ensurePythonEnvReady(projectId, 'roadmap-error')) { + return; + } + // Kill existing process for this project if any const wasKilled = this.processManager.killProcess(projectId); if (wasKilled) { @@ -517,9 +575,6 @@ export class AgentQueueManager { const spawnId = this.state.generateSpawnId(); debugLog('[Agent Queue] Generated roadmap spawn ID:', spawnId); - // Run from auto-claude source directory so imports work correctly - const autoBuildSource = this.processManager.getAutoBuildSourcePath(); - const cwd = autoBuildSource || process.cwd(); // Get combined environment variables const combinedEnv = this.processManager.getCombinedEnv(projectPath); @@ -527,6 +582,12 @@ export class AgentQueueManager { // Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default) const profileEnv = getProfileEnv(); + // Get active API profile environment variables + const apiProfileEnv = await getAPIProfileEnv(); + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + // Get Python path from process manager (uses venv if configured) const pythonPath = this.processManager.getPythonPath(); @@ -547,28 +608,30 @@ export class AgentQueueManager { // 1. process.env (system) // 2. pythonEnv (bundled packages environment) // 3. combinedEnv (auto-claude/.env for CLI usage) - // 4. profileEnv (Electron app OAuth token - highest priority) - // 5. Our specific overrides + // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode) + // 5. profileEnv (Electron app OAuth token) + // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars) + // 7. Our specific overrides const finalEnv = { ...process.env, ...pythonEnv, ...combinedEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONPATH: combinedPythonPath, PYTHONUNBUFFERED: '1', PYTHONUTF8: '1' }; - // Debug: Show OAuth token source + // Debug: Show OAuth token source (token values intentionally omitted for security - AC4) const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'Electron app profile' : (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found'); - const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; - const hasToken = !!oauthToken; + const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; debugLog('[Agent Queue] OAuth token status:', { source: tokenSource, - hasToken, - tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none' + hasToken }); // Parse Python command to handle space-separated commands like "py -3" diff --git a/apps/frontend/src/main/agent/env-utils.test.ts b/apps/frontend/src/main/agent/env-utils.test.ts new file mode 100644 index 0000000000..6a5d42c54e --- /dev/null +++ b/apps/frontend/src/main/agent/env-utils.test.ts @@ -0,0 +1,134 @@ +/** + * Unit tests for env-utils + * Tests OAuth mode environment variable clearing functionality + */ + +import { describe, it, expect } from 'vitest'; +import { getOAuthModeClearVars } from './env-utils'; + +describe('getOAuthModeClearVars', () => { + describe('OAuth mode (no active API profile)', () => { + it('should return clearing vars when apiProfileEnv is empty', () => { + const result = getOAuthModeClearVars({}); + + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + + it('should clear all ANTHROPIC_* environment variables', () => { + const result = getOAuthModeClearVars({}); + + // Verify all known ANTHROPIC_* vars are cleared + expect(result.ANTHROPIC_API_KEY).toBe(''); + expect(result.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(result.ANTHROPIC_BASE_URL).toBe(''); + expect(result.ANTHROPIC_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe(''); + }); + }); + + describe('API Profile mode (active profile)', () => { + it('should return empty object when apiProfileEnv has values', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-active-profile', + ANTHROPIC_BASE_URL: 'https://custom.api.com' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + expect(result).toEqual({}); + }); + + it('should NOT clear vars when API profile is active', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-test', + ANTHROPIC_BASE_URL: 'https://test.com', + ANTHROPIC_MODEL: 'claude-3-opus' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + // Should not return any clearing vars + expect(Object.keys(result)).toHaveLength(0); + }); + + it('should detect non-empty profile even with single property', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-minimal' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + expect(result).toEqual({}); + }); + }); + + describe('Edge cases', () => { + it('should handle undefined gracefully (treat as empty)', () => { + // TypeScript should prevent this, but runtime safety + const result = getOAuthModeClearVars(undefined as any); + + // Should treat undefined as empty object -> OAuth mode + expect(result).toBeDefined(); + }); + + it('should handle null gracefully (treat as empty)', () => { + // Runtime safety for null values + const result = getOAuthModeClearVars(null as any); + + // Should treat null as OAuth mode and return clearing vars + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + + it('should return consistent object shape for OAuth mode', () => { + const result1 = getOAuthModeClearVars({}); + const result2 = getOAuthModeClearVars({}); + + expect(result1).toEqual(result2); + // Use specific expected keys instead of magic number + const expectedKeys = [ + 'ANTHROPIC_API_KEY', + 'ANTHROPIC_AUTH_TOKEN', + 'ANTHROPIC_BASE_URL', + 'ANTHROPIC_MODEL', + 'ANTHROPIC_DEFAULT_HAIKU_MODEL', + 'ANTHROPIC_DEFAULT_SONNET_MODEL', + 'ANTHROPIC_DEFAULT_OPUS_MODEL' + ]; + expect(Object.keys(result1).sort()).toEqual(expectedKeys.sort()); + }); + + it('should NOT clear if apiProfileEnv has non-ANTHROPIC keys only', () => { + // Edge case: service returns metadata but no ANTHROPIC_* vars + const result = getOAuthModeClearVars({ SOME_OTHER_VAR: 'value' }); + + // Should treat as OAuth mode since no ANTHROPIC_* keys present + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + }); +}); diff --git a/apps/frontend/src/main/agent/env-utils.ts b/apps/frontend/src/main/agent/env-utils.ts new file mode 100644 index 0000000000..ba384dfa01 --- /dev/null +++ b/apps/frontend/src/main/agent/env-utils.ts @@ -0,0 +1,44 @@ +/** + * Utility functions for managing environment variables in agent spawning + */ + +/** + * Get environment variables to clear ANTHROPIC_* vars when in OAuth mode + * + * When switching from API Profile mode to OAuth mode, residual ANTHROPIC_* + * environment variables from process.env can cause authentication failures. + * This function returns an object with empty strings for these vars when + * no API profile is active, ensuring OAuth tokens are used correctly. + * + * **Why empty strings?** Setting environment variables to empty strings (rather than + * undefined) ensures they override any stale values from process.env. Python's SDK + * treats empty strings as falsy in conditional checks like `if token:`, so empty + * strings effectively disable these authentication parameters without leaving + * undefined values that might be ignored during object spreading. + * + * @param apiProfileEnv - Environment variables from getAPIProfileEnv() + * @returns Object with empty ANTHROPIC_* vars if in OAuth mode, empty object otherwise + */ +export function getOAuthModeClearVars(apiProfileEnv: Record): Record { + // If API profile is active (has ANTHROPIC_* vars), don't clear anything + if (apiProfileEnv && Object.keys(apiProfileEnv).some(key => key.startsWith('ANTHROPIC_'))) { + return {}; + } + + // In OAuth mode (no API profile), clear all ANTHROPIC_* vars + // Setting to empty string ensures they override any values from process.env + // Python's `if token:` checks treat empty strings as falsy + // + // IMPORTANT: ANTHROPIC_API_KEY is included to prevent Claude Code from using + // API keys that may be present in the shell environment instead of OAuth tokens. + // Without clearing this, Claude Code would show "Claude API" instead of "Claude Max". + return { + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }; +} diff --git a/apps/frontend/src/main/app-updater.ts b/apps/frontend/src/main/app-updater.ts index a76444dd3b..98f1f824bf 100644 --- a/apps/frontend/src/main/app-updater.ts +++ b/apps/frontend/src/main/app-updater.ts @@ -18,12 +18,16 @@ */ import { autoUpdater } from 'electron-updater'; -import { app } from 'electron'; +import { app, net } from 'electron'; import type { BrowserWindow } from 'electron'; import { IPC_CHANNELS } from '../shared/constants'; import type { AppUpdateInfo } from '../shared/types'; import { compareVersions } from './updater/version-manager'; +// GitHub repo info for API calls +const GITHUB_OWNER = 'AndyMik90'; +const GITHUB_REPO = 'Auto-Claude'; + // Debug mode - DEBUG_UPDATER=true or development mode const DEBUG_UPDATER = process.env.DEBUG_UPDATER === 'true' || process.env.NODE_ENV === 'development'; @@ -251,3 +255,214 @@ export function quitAndInstall(): void { export function getCurrentVersion(): string { return autoUpdater.currentVersion.version; } + +/** + * Check if a version string represents a prerelease (beta, alpha, rc, etc.) + */ +export function isPrerelease(version: string): boolean { + return /-(alpha|beta|rc|dev|canary)\.\d+$/i.test(version) || version.includes('-'); +} + +// Timeout for GitHub API requests (10 seconds) +const GITHUB_API_TIMEOUT = 10000; + +/** + * Fetch the latest stable release from GitHub API + * Returns the latest non-prerelease version + */ +async function fetchLatestStableRelease(): Promise { + const fetchPromise = new Promise((resolve) => { + const url = `https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPO}/releases`; + console.warn('[app-updater] Fetching releases from:', url); + + const request = net.request({ + url, + method: 'GET' + }); + + request.setHeader('Accept', 'application/vnd.github.v3+json'); + request.setHeader('User-Agent', `Auto-Claude/${getCurrentVersion()}`); + + let data = ''; + + request.on('response', (response) => { + // Validate HTTP status code + const statusCode = response.statusCode; + if (statusCode !== 200) { + // Sanitize statusCode to prevent log injection + // Convert to number and validate range to ensure it's a valid HTTP status code + const numericCode = Number(statusCode); + const safeStatusCode = (Number.isInteger(numericCode) && numericCode >= 100 && numericCode < 600) + ? String(numericCode) + : 'unknown'; + console.error(`[app-updater] GitHub API error: HTTP ${safeStatusCode}`); + if (statusCode === 403) { + console.error('[app-updater] Rate limit may have been exceeded'); + } else if (statusCode === 404) { + console.error('[app-updater] Repository or releases not found'); + } + resolve(null); + return; + } + + response.on('data', (chunk) => { + data += chunk.toString(); + }); + + response.on('end', () => { + try { + const parsed = JSON.parse(data); + + // Validate response is an array + if (!Array.isArray(parsed)) { + console.error('[app-updater] Unexpected response format - expected array, got:', typeof parsed); + resolve(null); + return; + } + + const releases = parsed as Array<{ + tag_name: string; + prerelease: boolean; + draft: boolean; + body?: string; + published_at?: string; + html_url?: string; + }>; + + // Find the first non-prerelease, non-draft release + const latestStable = releases.find(r => !r.prerelease && !r.draft); + + if (!latestStable) { + console.warn('[app-updater] No stable release found'); + resolve(null); + return; + } + + const version = latestStable.tag_name.replace(/^v/, ''); + // Sanitize version string for logging (remove control characters and limit length) + // eslint-disable-next-line no-control-regex + const safeVersion = String(version).replace(/[\x00-\x1f\x7f]/g, '').slice(0, 50); + console.warn('[app-updater] Found latest stable release:', safeVersion); + + resolve({ + version, + releaseNotes: latestStable.body, + releaseDate: latestStable.published_at + }); + } catch (e) { + // Sanitize error message for logging (prevent log injection from malformed JSON) + const safeError = e instanceof Error ? e.message : 'Unknown parse error'; + console.error('[app-updater] Failed to parse releases JSON:', safeError); + resolve(null); + } + }); + }); + + request.on('error', (error) => { + // Sanitize error message for logging (use only the message property) + const safeErrorMessage = error instanceof Error ? error.message : 'Unknown error'; + console.error('[app-updater] Failed to fetch releases:', safeErrorMessage); + resolve(null); + }); + + request.end(); + }); + + // Add timeout to prevent hanging indefinitely + const timeoutPromise = new Promise((resolve) => { + setTimeout(() => { + console.error(`[app-updater] GitHub API request timed out after ${GITHUB_API_TIMEOUT}ms`); + resolve(null); + }, GITHUB_API_TIMEOUT); + }); + + return Promise.race([fetchPromise, timeoutPromise]); +} + +/** + * Check if we should offer a downgrade to stable + * Called when user disables beta updates while on a prerelease version + * + * Returns the latest stable version if: + * 1. Current version is a prerelease + * 2. A stable version exists + */ +export async function checkForStableDowngrade(): Promise { + const currentVersion = getCurrentVersion(); + + // Only check for downgrade if currently on a prerelease + if (!isPrerelease(currentVersion)) { + console.warn('[app-updater] Current version is not a prerelease, no downgrade needed'); + return null; + } + + console.warn('[app-updater] Current version is prerelease:', currentVersion); + console.warn('[app-updater] Checking for stable version to downgrade to...'); + + const latestStable = await fetchLatestStableRelease(); + + if (!latestStable) { + console.warn('[app-updater] No stable release available for downgrade'); + return null; + } + + console.warn('[app-updater] Stable downgrade available:', latestStable.version); + return latestStable; +} + +/** + * Set update channel with optional downgrade check + * When switching from beta to stable, checks if user should be offered a downgrade + * + * @param channel - The update channel to switch to + * @param triggerDowngradeCheck - Whether to check for stable downgrade (when disabling beta) + */ +export async function setUpdateChannelWithDowngradeCheck( + channel: UpdateChannel, + triggerDowngradeCheck = false +): Promise { + autoUpdater.channel = channel; + console.warn(`[app-updater] Update channel set to: ${channel}`); + + // If switching to stable and downgrade check requested, look for stable version + if (channel === 'latest' && triggerDowngradeCheck) { + const stableVersion = await checkForStableDowngrade(); + + if (stableVersion && mainWindow) { + // Notify the renderer about the available stable downgrade + mainWindow.webContents.send(IPC_CHANNELS.APP_UPDATE_STABLE_DOWNGRADE, stableVersion); + } + + return stableVersion; + } + + return null; +} + +/** + * Download a specific version (for downgrade) + * Uses electron-updater with allowDowngrade enabled to download older stable versions + */ +export async function downloadStableVersion(): Promise { + // Switch to stable channel + autoUpdater.channel = 'latest'; + // Enable downgrade to allow downloading older versions (e.g., stable when on beta) + autoUpdater.allowDowngrade = true; + console.warn('[app-updater] Downloading stable version (allowDowngrade=true)...'); + + try { + // Force a fresh check on the stable channel, then download + const result = await autoUpdater.checkForUpdates(); + if (result) { + await autoUpdater.downloadUpdate(); + } else { + throw new Error('No stable version available for download'); + } + } catch (error) { + console.error('[app-updater] Failed to download stable version:', error); + throw error; + } finally { + // Reset allowDowngrade to prevent unintended downgrades in normal update checks + autoUpdater.allowDowngrade = false; + } +} diff --git a/apps/frontend/src/main/auto-claude-updater.ts b/apps/frontend/src/main/auto-claude-updater.ts deleted file mode 100644 index b19e19855e..0000000000 --- a/apps/frontend/src/main/auto-claude-updater.ts +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Auto Claude Source Updater - * - * Checks GitHub Releases for updates and downloads them. - * GitHub Releases are the single source of truth for versioning. - * - * Update flow: - * 1. Check GitHub Releases API for the latest release - * 2. Compare release tag with current app version - * 3. If update available, download release tarball and apply - * 4. Existing project update system handles pushing to individual projects - * - * Versioning: - * - Single source of truth: GitHub Releases - * - Current version: app.getVersion() (from package.json at build time) - * - Latest version: Fetched from GitHub Releases API - * - To release: Create a GitHub release with tag (e.g., v1.2.0) - */ - -// Export types -export type { - GitHubRelease, - AutoBuildUpdateCheck, - AutoBuildUpdateResult, - UpdateProgressCallback, - UpdateMetadata -} from './updater/types'; - -// Export version management -export { getBundledVersion, getEffectiveVersion } from './updater/version-manager'; - -// Export path resolution -export { - getBundledSourcePath, - getEffectiveSourcePath -} from './updater/path-resolver'; - -// Export update checking -export { checkForUpdates } from './updater/update-checker'; - -// Export update installation -export { downloadAndApplyUpdate } from './updater/update-installer'; - -// Export update status -export { - hasPendingSourceUpdate, - getUpdateMetadata -} from './updater/update-status'; diff --git a/apps/frontend/src/main/changelog/generator.ts b/apps/frontend/src/main/changelog/generator.ts index c71af9c3d4..6fa75c06fb 100644 --- a/apps/frontend/src/main/changelog/generator.ts +++ b/apps/frontend/src/main/changelog/generator.ts @@ -13,6 +13,7 @@ import { extractChangelog } from './parser'; import { getCommits, getBranchDiffCommits } from './git-integration'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector'; import { parsePythonCommand } from '../python-detector'; +import { getAugmentedEnv } from '../env-utils'; /** * Core changelog generation logic @@ -246,21 +247,9 @@ export class ChangelogGenerator extends EventEmitter { const homeDir = os.homedir(); const isWindows = process.platform === 'win32'; - // Build PATH with platform-appropriate separator and locations - const pathAdditions = isWindows - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'), - path.join(homeDir, 'AppData', 'Roaming', 'npm'), - path.join(homeDir, '.local', 'bin'), - 'C:\\Program Files\\Claude', - 'C:\\Program Files (x86)\\Claude' - ] - : [ - '/usr/local/bin', - '/opt/homebrew/bin', - path.join(homeDir, '.local', 'bin'), - path.join(homeDir, 'bin') - ]; + // Use getAugmentedEnv() to ensure common tool paths are available + // even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); // Get active Claude profile environment (OAuth token preferred, falls back to CLAUDE_CONFIG_DIR) const profileEnv = getProfileEnv(); @@ -271,15 +260,13 @@ export class ChangelogGenerator extends EventEmitter { }); const spawnEnv: Record = { - ...process.env as Record, + ...augmentedEnv, ...this.autoBuildEnv, ...profileEnv, // Include active Claude profile config // Ensure critical env vars are set for claude CLI // Use USERPROFILE on Windows, HOME on Unix ...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }), USER: process.env.USER || process.env.USERNAME || 'user', - // Add common binary locations to PATH for claude CLI - PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter), PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' diff --git a/apps/frontend/src/main/changelog/version-suggester.ts b/apps/frontend/src/main/changelog/version-suggester.ts index 4869fe41ef..6d4a9b9126 100644 --- a/apps/frontend/src/main/changelog/version-suggester.ts +++ b/apps/frontend/src/main/changelog/version-suggester.ts @@ -1,9 +1,9 @@ import { spawn } from 'child_process'; -import * as path from 'path'; import * as os from 'os'; import type { GitCommit } from '../../shared/types'; import { getProfileEnv } from '../rate-limit-detector'; import { parsePythonCommand } from '../python-detector'; +import { getAugmentedEnv } from '../env-utils'; interface VersionSuggestion { version: string; @@ -215,31 +215,19 @@ except Exception as e: const homeDir = os.homedir(); const isWindows = process.platform === 'win32'; - // Build PATH with platform-appropriate separator and locations - const pathAdditions = isWindows - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'), - path.join(homeDir, 'AppData', 'Roaming', 'npm'), - path.join(homeDir, '.local', 'bin'), - 'C:\\Program Files\\Claude', - 'C:\\Program Files (x86)\\Claude' - ] - : [ - '/usr/local/bin', - '/opt/homebrew/bin', - path.join(homeDir, '.local', 'bin'), - path.join(homeDir, 'bin') - ]; + // Use getAugmentedEnv() to ensure common tool paths are available + // even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); // Get active Claude profile environment const profileEnv = getProfileEnv(); const spawnEnv: Record = { - ...process.env as Record, + ...augmentedEnv, ...profileEnv, + // Ensure critical env vars are set for claude CLI ...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }), USER: process.env.USER || process.env.USERNAME || 'user', - PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter), PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' diff --git a/apps/frontend/src/main/claude-cli-utils.ts b/apps/frontend/src/main/claude-cli-utils.ts new file mode 100644 index 0000000000..49a0c49c71 --- /dev/null +++ b/apps/frontend/src/main/claude-cli-utils.ts @@ -0,0 +1,77 @@ +import path from 'path'; +import { getAugmentedEnv, getAugmentedEnvAsync } from './env-utils'; +import { getToolPath, getToolPathAsync } from './cli-tool-manager'; + +export type ClaudeCliInvocation = { + command: string; + env: Record; +}; + +function ensureCommandDirInPath(command: string, env: Record): Record { + if (!path.isAbsolute(command)) { + return env; + } + + const pathSeparator = process.platform === 'win32' ? ';' : ':'; + const commandDir = path.dirname(command); + const currentPath = env.PATH || ''; + const pathEntries = currentPath.split(pathSeparator); + const normalizedCommandDir = path.normalize(commandDir); + const hasCommandDir = process.platform === 'win32' + ? pathEntries + .map((entry) => path.normalize(entry).toLowerCase()) + .includes(normalizedCommandDir.toLowerCase()) + : pathEntries + .map((entry) => path.normalize(entry)) + .includes(normalizedCommandDir); + + if (hasCommandDir) { + return env; + } + + return { + ...env, + PATH: [commandDir, currentPath].filter(Boolean).join(pathSeparator), + }; +} + +/** + * Returns the Claude CLI command path and an environment with PATH updated to include the CLI directory. + * + * WARNING: This function uses synchronous subprocess calls that block the main process. + * For use in Electron main process, prefer getClaudeCliInvocationAsync() instead. + */ +export function getClaudeCliInvocation(): ClaudeCliInvocation { + const command = getToolPath('claude'); + const env = getAugmentedEnv(); + + return { + command, + env: ensureCommandDirInPath(command, env), + }; +} + +/** + * Returns the Claude CLI command path and environment asynchronously (non-blocking). + * + * Safe to call from Electron main process without blocking the event loop. + * Uses cached values if available for instant response. + * + * @example + * ```typescript + * const { command, env } = await getClaudeCliInvocationAsync(); + * spawn(command, ['--version'], { env }); + * ``` + */ +export async function getClaudeCliInvocationAsync(): Promise { + // Run both detections in parallel for efficiency + const [command, env] = await Promise.all([ + getToolPathAsync('claude'), + getAugmentedEnvAsync(), + ]); + + return { + command, + env: ensureCommandDirInPath(command, env), + }; +} diff --git a/apps/frontend/src/main/claude-profile-manager.ts b/apps/frontend/src/main/claude-profile-manager.ts index 0f9c88f6d6..f64ef42d81 100644 --- a/apps/frontend/src/main/claude-profile-manager.ts +++ b/apps/frontend/src/main/claude-profile-manager.ts @@ -13,7 +13,7 @@ import { app } from 'electron'; import { join } from 'path'; -import { existsSync, mkdirSync } from 'fs'; +import { mkdir } from 'fs/promises'; import type { ClaudeProfile, ClaudeProfileSettings, @@ -32,6 +32,7 @@ import { } from './claude-profile/rate-limit-manager'; import { loadProfileStore, + loadProfileStoreAsync, saveProfileStore, ProfileStoreData, DEFAULT_AUTO_SWITCH_SETTINGS @@ -57,19 +58,45 @@ import { */ export class ClaudeProfileManager { private storePath: string; + private configDir: string; private data: ProfileStoreData; + private initialized: boolean = false; constructor() { - const configDir = join(app.getPath('userData'), 'config'); - this.storePath = join(configDir, 'claude-profiles.json'); + this.configDir = join(app.getPath('userData'), 'config'); + this.storePath = join(this.configDir, 'claude-profiles.json'); - // Ensure directory exists - if (!existsSync(configDir)) { - mkdirSync(configDir, { recursive: true }); + // DON'T do file I/O here - defer to async initialize() + // Start with default data until initialized + this.data = this.createDefaultData(); + } + + /** + * Initialize the profile manager asynchronously (non-blocking) + * This should be called at app startup via initializeClaudeProfileManager() + */ + async initialize(): Promise { + if (this.initialized) return; + + // Ensure directory exists (async) - mkdir with recursive:true is idempotent + await mkdir(this.configDir, { recursive: true }); + + // Load existing data asynchronously + const loadedData = await loadProfileStoreAsync(this.storePath); + if (loadedData) { + this.data = loadedData; } + // else: keep the default data from constructor + + this.initialized = true; + console.warn('[ClaudeProfileManager] Initialized asynchronously'); + } - // Load existing data or initialize with default profile - this.data = this.load(); + /** + * Check if the profile manager has been initialized + */ + isInitialized(): boolean { + return this.initialized; } /** @@ -522,11 +549,13 @@ export class ClaudeProfileManager { } } -// Singleton instance +// Singleton instance and initialization promise let profileManager: ClaudeProfileManager | null = null; +let initPromise: Promise | null = null; /** * Get the singleton Claude profile manager instance + * Note: For async contexts, prefer initializeClaudeProfileManager() to ensure initialization */ export function getClaudeProfileManager(): ClaudeProfileManager { if (!profileManager) { @@ -534,3 +563,28 @@ export function getClaudeProfileManager(): ClaudeProfileManager { } return profileManager; } + +/** + * Initialize and get the singleton Claude profile manager instance (async) + * This ensures the profile manager is fully initialized before use. + * Uses promise caching to prevent concurrent initialization. + */ +export async function initializeClaudeProfileManager(): Promise { + if (!profileManager) { + profileManager = new ClaudeProfileManager(); + } + + // If already initialized, return immediately + if (profileManager.isInitialized()) { + return profileManager; + } + + // If initialization is in progress, wait for it (promise caching) + if (!initPromise) { + initPromise = profileManager.initialize().then(() => { + return profileManager!; + }); + } + + return initPromise; +} diff --git a/apps/frontend/src/main/claude-profile/profile-storage.ts b/apps/frontend/src/main/claude-profile/profile-storage.ts index bd5b89c372..a4c825e2f2 100644 --- a/apps/frontend/src/main/claude-profile/profile-storage.ts +++ b/apps/frontend/src/main/claude-profile/profile-storage.ts @@ -4,6 +4,7 @@ */ import { existsSync, readFileSync, writeFileSync } from 'fs'; +import { readFile } from 'fs/promises'; import type { ClaudeProfile, ClaudeAutoSwitchSettings } from '../../shared/types'; export const STORE_VERSION = 3; // Bumped for encrypted token storage @@ -30,6 +31,42 @@ export interface ProfileStoreData { autoSwitch?: ClaudeAutoSwitchSettings; } +/** + * Parse and migrate profile data from JSON. + * Handles version migration and date parsing. + * Shared helper used by both sync and async loaders. + */ +function parseAndMigrateProfileData(data: Record): ProfileStoreData | null { + // Handle version migration + if (data.version === 1) { + // Migrate v1 to v2: add usage and rateLimitEvents fields + data.version = STORE_VERSION; + data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS; + } + + if (data.version === STORE_VERSION) { + // Parse dates + const profiles = data.profiles as ClaudeProfile[]; + data.profiles = profiles.map((p: ClaudeProfile) => ({ + ...p, + createdAt: new Date(p.createdAt), + lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined, + usage: p.usage ? { + ...p.usage, + lastUpdated: new Date(p.usage.lastUpdated) + } : undefined, + rateLimitEvents: p.rateLimitEvents?.map(e => ({ + ...e, + hitAt: new Date(e.hitAt), + resetAt: new Date(e.resetAt) + })) + })); + return data as unknown as ProfileStoreData; + } + + return null; +} + /** * Load profiles from disk */ @@ -38,32 +75,7 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null { if (existsSync(storePath)) { const content = readFileSync(storePath, 'utf-8'); const data = JSON.parse(content); - - // Handle version migration - if (data.version === 1) { - // Migrate v1 to v2: add usage and rateLimitEvents fields - data.version = STORE_VERSION; - data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS; - } - - if (data.version === STORE_VERSION) { - // Parse dates - data.profiles = data.profiles.map((p: ClaudeProfile) => ({ - ...p, - createdAt: new Date(p.createdAt), - lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined, - usage: p.usage ? { - ...p.usage, - lastUpdated: new Date(p.usage.lastUpdated) - } : undefined, - rateLimitEvents: p.rateLimitEvents?.map(e => ({ - ...e, - hitAt: new Date(e.hitAt), - resetAt: new Date(e.resetAt) - })) - })); - return data; - } + return parseAndMigrateProfileData(data); } } catch (error) { console.error('[ProfileStorage] Error loading profiles:', error); @@ -72,6 +84,27 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null { return null; } +/** + * Load profiles from disk (async, non-blocking) + * Use this version for initialization to avoid blocking the main process. + */ +export async function loadProfileStoreAsync(storePath: string): Promise { + try { + // Read file directly - avoid TOCTOU race condition by not checking existence first + // If file doesn't exist, readFile will throw ENOENT which we handle below + const content = await readFile(storePath, 'utf-8'); + const data = JSON.parse(content); + return parseAndMigrateProfileData(data); + } catch (error) { + // ENOENT is expected if file doesn't exist yet + if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { + console.error('[ProfileStorage] Error loading profiles:', error); + } + } + + return null; +} + /** * Save profiles to disk */ diff --git a/apps/frontend/src/main/claude-profile/profile-utils.ts b/apps/frontend/src/main/claude-profile/profile-utils.ts index 557d8fae0e..80a3c048cb 100644 --- a/apps/frontend/src/main/claude-profile/profile-utils.ts +++ b/apps/frontend/src/main/claude-profile/profile-utils.ts @@ -56,7 +56,7 @@ export async function createProfileDirectory(profileName: string): Promise { + try { + await fsPromises.access(filePath); + return true; + } catch { + return false; + } +} import type { ToolDetectionResult } from '../shared/types'; import { findHomebrewPython as findHomebrewPythonUtil } from './utils/homebrew-python'; +import { + getWindowsExecutablePaths, + getWindowsExecutablePathsAsync, + WINDOWS_GIT_PATHS, + findWindowsExecutableViaWhere, + findWindowsExecutableViaWhereAsync, +} from './utils/windows-paths'; /** * Supported CLI tools managed by this system @@ -103,6 +130,139 @@ function isWrongPlatformPath(pathStr: string | undefined): boolean { return false; } +// ============================================================================ +// SHARED HELPERS - Used by both sync and async Claude detection +// ============================================================================ + +/** + * Configuration for Claude CLI detection paths + */ +interface ClaudeDetectionPaths { + /** Homebrew paths for macOS (Apple Silicon and Intel) */ + homebrewPaths: string[]; + /** Platform-specific standard installation paths */ + platformPaths: string[]; + /** Path to NVM versions directory for Node.js-installed Claude */ + nvmVersionsDir: string; +} + +/** + * Get all candidate paths for Claude CLI detection. + * + * Returns platform-specific paths where Claude CLI might be installed. + * This pure function consolidates path configuration used by both sync + * and async detection methods. + * + * @param homeDir - User's home directory (from os.homedir()) + * @returns Object containing homebrew, platform, and NVM paths + * + * @example + * const paths = getClaudeDetectionPaths('/Users/john'); + * // On macOS: { homebrewPaths: ['/opt/homebrew/bin/claude', ...], ... } + */ +export function getClaudeDetectionPaths(homeDir: string): ClaudeDetectionPaths { + const homebrewPaths = [ + '/opt/homebrew/bin/claude', // Apple Silicon + '/usr/local/bin/claude', // Intel Mac + ]; + + const platformPaths = process.platform === 'win32' + ? [ + path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'), + path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'), + path.join(homeDir, '.local', 'bin', 'claude.exe'), + 'C:\\Program Files\\Claude\\claude.exe', + 'C:\\Program Files (x86)\\Claude\\claude.exe', + ] + : [ + path.join(homeDir, '.local', 'bin', 'claude'), + path.join(homeDir, 'bin', 'claude'), + ]; + + const nvmVersionsDir = path.join(homeDir, '.nvm', 'versions', 'node'); + + return { homebrewPaths, platformPaths, nvmVersionsDir }; +} + +/** + * Sort NVM version directories by semantic version (newest first). + * + * Filters entries to only include directories starting with 'v' (version directories) + * and sorts them in descending order so the newest Node.js version is checked first. + * + * @param entries - Directory entries from readdir with { name, isDirectory() } + * @returns Array of version directory names sorted newest first + * + * @example + * const entries = [ + * { name: 'v18.0.0', isDirectory: () => true }, + * { name: 'v20.0.0', isDirectory: () => true }, + * { name: '.DS_Store', isDirectory: () => false }, + * ]; + * sortNvmVersionDirs(entries); // ['v20.0.0', 'v18.0.0'] + */ +export function sortNvmVersionDirs( + entries: Array<{ name: string; isDirectory(): boolean }> +): string[] { + // Regex to match valid semver directories: v20.0.0, v18.17.1, etc. + // This prevents NaN from malformed versions (e.g., v20.abc.1) breaking sort + const semverRegex = /^v\d+\.\d+\.\d+$/; + + return entries + .filter((entry) => entry.isDirectory() && semverRegex.test(entry.name)) + .sort((a, b) => { + // Parse version numbers: v20.0.0 -> [20, 0, 0] + const vA = a.name.slice(1).split('.').map(Number); + const vB = b.name.slice(1).split('.').map(Number); + // Compare major, minor, patch in order (descending) + for (let i = 0; i < 3; i++) { + const diff = (vB[i] ?? 0) - (vA[i] ?? 0); + if (diff !== 0) return diff; + } + return 0; + }) + .map((entry) => entry.name); +} + +/** + * Build a ToolDetectionResult from a validation result. + * + * Returns null if validation failed, otherwise constructs the full result object. + * This helper consolidates the result-building logic used throughout detection. + * + * @param claudePath - The path that was validated + * @param validation - The validation result from validateClaude/validateClaudeAsync + * @param source - The source of detection ('user-config', 'homebrew', 'system-path', 'nvm') + * @param messagePrefix - Prefix for the success message (e.g., 'Using Homebrew Claude CLI') + * @returns ToolDetectionResult if valid, null if validation failed + * + * @example + * const result = buildClaudeDetectionResult( + * '/opt/homebrew/bin/claude', + * { valid: true, version: '1.0.0', message: 'OK' }, + * 'homebrew', + * 'Using Homebrew Claude CLI' + * ); + * // Returns: { found: true, path: '/opt/homebrew/bin/claude', version: '1.0.0', ... } + */ +export function buildClaudeDetectionResult( + claudePath: string, + validation: ToolValidation, + source: ToolDetectionResult['source'], + messagePrefix: string +): ToolDetectionResult | null { + if (!validation.valid) { + return null; + } + return { + found: true, + path: claudePath, + version: validation.version, + source, + message: `${messagePrefix}: ${claudePath}`, + }; +} + /** * Centralized CLI Tool Manager * @@ -392,7 +552,40 @@ class CLIToolManager { } } - // 4. Not found - fallback to 'git' + // 4. Windows-specific detection using 'where' command (most reliable for custom installs) + if (process.platform === 'win32') { + // First try 'where' command - finds git regardless of installation location + const whereGitPath = findWindowsExecutableViaWhere('git', '[Git]'); + if (whereGitPath) { + const validation = this.validateGit(whereGitPath); + if (validation.valid) { + return { + found: true, + path: whereGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${whereGitPath}`, + }; + } + } + + // Fallback to checking common installation paths + const windowsPaths = getWindowsExecutablePaths(WINDOWS_GIT_PATHS, '[Git]'); + for (const winGitPath of windowsPaths) { + const validation = this.validateGit(winGitPath); + if (validation.valid) { + return { + found: true, + path: winGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${winGitPath}`, + }; + } + } + } + + // 5. Not found - fallback to 'git' return { found: false, source: 'fallback', @@ -517,99 +710,75 @@ class CLIToolManager { * @returns Detection result for Claude CLI */ private detectClaude(): ToolDetectionResult { + const homeDir = os.homedir(); + const paths = getClaudeDetectionPaths(homeDir); + // 1. User configuration if (this.userConfig.claudePath) { - // Check if path is from wrong platform (e.g., Windows path on macOS) if (isWrongPlatformPath(this.userConfig.claudePath)) { console.warn( `[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}` ); } else { const validation = this.validateClaude(this.userConfig.claudePath); - if (validation.valid) { - return { - found: true, - path: this.userConfig.claudePath, - version: validation.version, - source: 'user-config', - message: `Using user-configured Claude CLI: ${this.userConfig.claudePath}`, - }; - } - console.warn( - `[Claude CLI] User-configured path invalid: ${validation.message}` + const result = buildClaudeDetectionResult( + this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI' ); + if (result) return result; + console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`); } } // 2. Homebrew (macOS) if (process.platform === 'darwin') { - const homebrewPaths = [ - '/opt/homebrew/bin/claude', // Apple Silicon - '/usr/local/bin/claude', // Intel Mac - ]; - - for (const claudePath of homebrewPaths) { + for (const claudePath of paths.homebrewPaths) { if (existsSync(claudePath)) { const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'homebrew', - message: `Using Homebrew Claude CLI: ${claudePath}`, - }; - } + const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI'); + if (result) return result; } } } // 3. System PATH (augmented) - const claudePath = findExecutable('claude'); - if (claudePath) { - const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'system-path', - message: `Using system Claude CLI: ${claudePath}`, - }; + const systemClaudePath = findExecutable('claude'); + if (systemClaudePath) { + const validation = this.validateClaude(systemClaudePath); + const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI'); + if (result) return result; + } + + // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration + if (process.platform !== 'win32') { + try { + if (existsSync(paths.nvmVersionsDir)) { + const nodeVersions = readdirSync(paths.nvmVersionsDir, { withFileTypes: true }); + const versionNames = sortNvmVersionDirs(nodeVersions); + + for (const versionName of versionNames) { + const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude'); + if (existsSync(nvmClaudePath)) { + const validation = this.validateClaude(nvmClaudePath); + const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI'); + if (result) return result; + } + } + } + } catch (error) { + console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`); } } - // 4. Platform-specific standard locations - const homeDir = os.homedir(); - const platformPaths = process.platform === 'win32' - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'), - path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'), - path.join(homeDir, '.local', 'bin', 'claude.exe'), - 'C:\\Program Files\\Claude\\claude.exe', - 'C:\\Program Files (x86)\\Claude\\claude.exe', - ] - : [ - path.join(homeDir, '.local', 'bin', 'claude'), - path.join(homeDir, 'bin', 'claude'), - ]; - - for (const claudePath of platformPaths) { + // 5. Platform-specific standard locations + for (const claudePath of paths.platformPaths) { if (existsSync(claudePath)) { const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'system-path', - message: `Using Claude CLI: ${claudePath}`, - }; - } + const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI'); + if (result) return result; } } - // 5. Not found + // 6. Not found return { found: false, source: 'fallback', @@ -759,6 +928,7 @@ class CLIToolManager { timeout: 5000, windowsHide: true, shell: needsShell, + env: getAugmentedEnv(), }).trim(); // Claude CLI version output format: "claude-code version X.Y.Z" or similar @@ -778,116 +948,747 @@ class CLIToolManager { } } + // ============================================================================ + // ASYNC METHODS - Non-blocking alternatives for Electron main process + // ============================================================================ + /** - * Get bundled Python path for packaged apps + * Get the path for a CLI tool asynchronously (non-blocking) * - * Only available in packaged Electron apps where Python is bundled - * in the resources directory. + * Uses cached path if available, otherwise detects asynchronously. + * Safe to call from Electron main process without blocking. * - * @returns Path to bundled Python or null if not found + * @param tool - The CLI tool to get the path for + * @returns Promise resolving to the tool path */ - private getBundledPythonPath(): string | null { - if (!app.isPackaged) { - return null; + async getToolPathAsync(tool: CLITool): Promise { + // Check cache first (instant return if cached) + const cached = this.cache.get(tool); + if (cached) { + console.warn( + `[CLI Tools] Using cached ${tool}: ${cached.path} (${cached.source})` + ); + return cached.path; } - const resourcesPath = process.resourcesPath; - const isWindows = process.platform === 'win32'; - - const pythonPath = isWindows - ? path.join(resourcesPath, 'python', 'python.exe') - : path.join(resourcesPath, 'python', 'bin', 'python3'); + // Detect asynchronously + const result = await this.detectToolPathAsync(tool); + if (result.found && result.path) { + this.cache.set(tool, { + path: result.path, + version: result.version, + source: result.source, + }); + console.warn(`[CLI Tools] Detected ${tool}: ${result.path} (${result.source})`); + return result.path; + } - return existsSync(pythonPath) ? pythonPath : null; + // Fallback to tool name (let system PATH resolve it) + console.warn(`[CLI Tools] ${tool} not found, using fallback: "${tool}"`); + return tool; } /** - * Find Homebrew Python on macOS - * Delegates to shared utility function. + * Detect tool path asynchronously * - * @returns Path to Homebrew Python or null if not found + * All tools now use async detection methods to prevent blocking the main process. + * + * @param tool - The tool to detect + * @returns Promise resolving to detection result */ - private findHomebrewPython(): string | null { - return findHomebrewPythonUtil( - (pythonPath) => this.validatePython(pythonPath), - '[CLI Tools]' - ); + private async detectToolPathAsync(tool: CLITool): Promise { + switch (tool) { + case 'claude': + return this.detectClaudeAsync(); + case 'python': + return this.detectPythonAsync(); + case 'git': + return this.detectGitAsync(); + case 'gh': + return this.detectGitHubCLIAsync(); + default: + return { + found: false, + source: 'fallback', + message: `Unknown tool: ${tool}`, + }; + } } /** - * Clear cache manually + * Validate Claude CLI asynchronously (non-blocking) * - * Useful for testing or forcing re-detection. - * Normally not needed as cache is cleared automatically on settings change. + * @param claudeCmd - The Claude CLI command to validate + * @returns Promise resolving to validation result */ - clearCache(): void { - this.cache.clear(); - console.warn('[CLI Tools] Cache cleared'); + private async validateClaudeAsync(claudeCmd: string): Promise { + try { + const needsShell = process.platform === 'win32' && + (claudeCmd.endsWith('.cmd') || claudeCmd.endsWith('.bat')); + + const { stdout } = await execFileAsync(claudeCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + shell: needsShell, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/(\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version.split('\n')[0]; + + return { + valid: true, + version: versionStr, + message: `Claude CLI ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Claude CLI: ${error instanceof Error ? error.message : String(error)}`, + }; + } } /** - * Get tool detection info for diagnostics - * - * Performs fresh detection without using cache. - * Useful for Settings UI to show current detection status. + * Validate Python version asynchronously (non-blocking) * - * @param tool - The tool to get detection info for - * @returns Detection result with full metadata + * @param pythonCmd - The Python command to validate + * @returns Promise resolving to validation result */ - getToolInfo(tool: CLITool): ToolDetectionResult { - return this.detectToolPath(tool); - } -} + private async validatePythonAsync(pythonCmd: string): Promise { + const MINIMUM_VERSION = '3.10.0'; -// Singleton instance -const cliToolManager = new CLIToolManager(); + try { + const parts = pythonCmd.split(' '); + const cmd = parts[0]; + const args = [...parts.slice(1), '--version']; -/** - * Get the path for a CLI tool - * - * Convenience function for accessing the tool manager singleton. - * Uses cached path if available, otherwise auto-detects. - * - * @param tool - The CLI tool to get the path for - * @returns The resolved path to the tool executable - * - * @example - * ```typescript - * import { getToolPath } from './cli-tool-manager'; - * - * const pythonPath = getToolPath('python'); - * const gitPath = getToolPath('git'); - * const ghPath = getToolPath('gh'); - * - * execSync(`${gitPath} status`, { cwd: projectPath }); - * ``` - */ -export function getToolPath(tool: CLITool): string { - return cliToolManager.getToolPath(tool); -} + const { stdout } = await execFileAsync(cmd, args, { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); -/** - * Configure CLI tools with user settings - * - * Call this when user updates CLI tool paths in Settings. - * Clears cache to force re-detection with new configuration. - * - * @param config - User configuration for CLI tool paths - * - * @example - * ```typescript - * import { configureTools } from './cli-tool-manager'; - * - * // When settings are loaded or updated - * configureTools({ - * pythonPath: settings.pythonPath, - * gitPath: settings.gitPath, - * githubCLIPath: settings.githubCLIPath, - * }); - * ``` - */ -export function configureTools(config: ToolConfig): void { - cliToolManager.configure(config); + const version = stdout.trim(); + const match = version.match(/Python (\d+\.\d+\.\d+)/); + if (!match) { + return { + valid: false, + message: 'Unable to detect Python version', + }; + } + + const versionStr = match[1]; + const [major, minor] = versionStr.split('.').map(Number); + const [reqMajor, reqMinor] = MINIMUM_VERSION.split('.').map(Number); + + const meetsRequirement = + major > reqMajor || (major === reqMajor && minor >= reqMinor); + + if (!meetsRequirement) { + return { + valid: false, + version: versionStr, + message: `Python ${versionStr} is too old. Requires ${MINIMUM_VERSION}+`, + }; + } + + return { + valid: true, + version: versionStr, + message: `Python ${versionStr} meets requirements`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Python: ${error}`, + }; + } + } + + /** + * Validate Git asynchronously (non-blocking) + * + * @param gitCmd - The Git command to validate + * @returns Promise resolving to validation result + */ + private async validateGitAsync(gitCmd: string): Promise { + try { + const { stdout } = await execFileAsync(gitCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/git version (\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version; + + return { + valid: true, + version: versionStr, + message: `Git ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Git: ${error instanceof Error ? error.message : String(error)}`, + }; + } + } + + /** + * Validate GitHub CLI asynchronously (non-blocking) + * + * @param ghCmd - The GitHub CLI command to validate + * @returns Promise resolving to validation result + */ + private async validateGitHubCLIAsync(ghCmd: string): Promise { + try { + const { stdout } = await execFileAsync(ghCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/gh version (\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version.split('\n')[0]; + + return { + valid: true, + version: versionStr, + message: `GitHub CLI ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate GitHub CLI: ${error instanceof Error ? error.message : String(error)}`, + }; + } + } + + /** + * Detect Claude CLI asynchronously (non-blocking) + * + * Same detection logic as detectClaude but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectClaudeAsync(): Promise { + const homeDir = os.homedir(); + const paths = getClaudeDetectionPaths(homeDir); + + // 1. User configuration + if (this.userConfig.claudePath) { + if (isWrongPlatformPath(this.userConfig.claudePath)) { + console.warn( + `[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}` + ); + } else { + const validation = await this.validateClaudeAsync(this.userConfig.claudePath); + const result = buildClaudeDetectionResult( + this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI' + ); + if (result) return result; + console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + for (const claudePath of paths.homebrewPaths) { + if (await existsAsync(claudePath)) { + const validation = await this.validateClaudeAsync(claudePath); + const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI'); + if (result) return result; + } + } + } + + // 3. System PATH (augmented) - using async findExecutable + const systemClaudePath = await findExecutableAsync('claude'); + if (systemClaudePath) { + const validation = await this.validateClaudeAsync(systemClaudePath); + const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI'); + if (result) return result; + } + + // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration + if (process.platform !== 'win32') { + try { + if (await existsAsync(paths.nvmVersionsDir)) { + const nodeVersions = await fsPromises.readdir(paths.nvmVersionsDir, { withFileTypes: true }); + const versionNames = sortNvmVersionDirs(nodeVersions); + + for (const versionName of versionNames) { + const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude'); + if (await existsAsync(nvmClaudePath)) { + const validation = await this.validateClaudeAsync(nvmClaudePath); + const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI'); + if (result) return result; + } + } + } + } catch (error) { + console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`); + } + } + + // 5. Platform-specific standard locations + for (const claudePath of paths.platformPaths) { + if (await existsAsync(claudePath)) { + const validation = await this.validateClaudeAsync(claudePath); + const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI'); + if (result) return result; + } + } + + // 6. Not found + return { + found: false, + source: 'fallback', + message: 'Claude CLI not found. Install from https://claude.ai/download', + }; + } + + /** + * Detect Python asynchronously (non-blocking) + * + * Same detection logic as detectPython but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectPythonAsync(): Promise { + const MINIMUM_VERSION = '3.10.0'; + + // 1. User configuration + if (this.userConfig.pythonPath) { + if (isWrongPlatformPath(this.userConfig.pythonPath)) { + console.warn( + `[Python] User-configured path is from different platform, ignoring: ${this.userConfig.pythonPath}` + ); + } else { + const validation = await this.validatePythonAsync(this.userConfig.pythonPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.pythonPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured Python: ${this.userConfig.pythonPath}`, + }; + } + console.warn(`[Python] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Bundled Python (packaged apps only) + if (app.isPackaged) { + const bundledPath = this.getBundledPythonPath(); + if (bundledPath) { + const validation = await this.validatePythonAsync(bundledPath); + if (validation.valid) { + return { + found: true, + path: bundledPath, + version: validation.version, + source: 'bundled', + message: `Using bundled Python: ${bundledPath}`, + }; + } + } + } + + // 3. Homebrew Python (macOS) - simplified async version + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/python3', + '/opt/homebrew/bin/python3.12', + '/opt/homebrew/bin/python3.11', + '/opt/homebrew/bin/python3.10', + '/usr/local/bin/python3', + ]; + for (const pythonPath of homebrewPaths) { + if (await existsAsync(pythonPath)) { + const validation = await this.validatePythonAsync(pythonPath); + if (validation.valid) { + return { + found: true, + path: pythonPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew Python: ${pythonPath}`, + }; + } + } + } + } + + // 4. System PATH (augmented) + const candidates = + process.platform === 'win32' + ? ['py -3', 'python', 'python3', 'py'] + : ['python3', 'python']; + + for (const cmd of candidates) { + if (cmd.startsWith('py ')) { + const validation = await this.validatePythonAsync(cmd); + if (validation.valid) { + return { + found: true, + path: cmd, + version: validation.version, + source: 'system-path', + message: `Using system Python: ${cmd}`, + }; + } + } else { + const pythonPath = await findExecutableAsync(cmd); + if (pythonPath) { + const validation = await this.validatePythonAsync(pythonPath); + if (validation.valid) { + return { + found: true, + path: pythonPath, + version: validation.version, + source: 'system-path', + message: `Using system Python: ${pythonPath}`, + }; + } + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: + `Python ${MINIMUM_VERSION}+ not found. ` + + 'Please install Python or configure in Settings.', + }; + } + + /** + * Detect Git asynchronously (non-blocking) + * + * Same detection logic as detectGit but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectGitAsync(): Promise { + // 1. User configuration + if (this.userConfig.gitPath) { + if (isWrongPlatformPath(this.userConfig.gitPath)) { + console.warn( + `[Git] User-configured path is from different platform, ignoring: ${this.userConfig.gitPath}` + ); + } else { + const validation = await this.validateGitAsync(this.userConfig.gitPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.gitPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured Git: ${this.userConfig.gitPath}`, + }; + } + console.warn(`[Git] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/git', + '/usr/local/bin/git', + ]; + + for (const gitPath of homebrewPaths) { + if (await existsAsync(gitPath)) { + const validation = await this.validateGitAsync(gitPath); + if (validation.valid) { + return { + found: true, + path: gitPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew Git: ${gitPath}`, + }; + } + } + } + } + + // 3. System PATH (augmented) + const gitPath = await findExecutableAsync('git'); + if (gitPath) { + const validation = await this.validateGitAsync(gitPath); + if (validation.valid) { + return { + found: true, + path: gitPath, + version: validation.version, + source: 'system-path', + message: `Using system Git: ${gitPath}`, + }; + } + } + + // 4. Windows-specific detection (async to avoid blocking main process) + if (process.platform === 'win32') { + const whereGitPath = await findWindowsExecutableViaWhereAsync('git', '[Git]'); + if (whereGitPath) { + const validation = await this.validateGitAsync(whereGitPath); + if (validation.valid) { + return { + found: true, + path: whereGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${whereGitPath}`, + }; + } + } + + const windowsPaths = await getWindowsExecutablePathsAsync(WINDOWS_GIT_PATHS, '[Git]'); + for (const winGitPath of windowsPaths) { + const validation = await this.validateGitAsync(winGitPath); + if (validation.valid) { + return { + found: true, + path: winGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${winGitPath}`, + }; + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: 'Git not found in standard locations. Using fallback "git".', + }; + } + + /** + * Detect GitHub CLI asynchronously (non-blocking) + * + * Same detection logic as detectGitHubCLI but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectGitHubCLIAsync(): Promise { + // 1. User configuration + if (this.userConfig.githubCLIPath) { + if (isWrongPlatformPath(this.userConfig.githubCLIPath)) { + console.warn( + `[GitHub CLI] User-configured path is from different platform, ignoring: ${this.userConfig.githubCLIPath}` + ); + } else { + const validation = await this.validateGitHubCLIAsync(this.userConfig.githubCLIPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.githubCLIPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured GitHub CLI: ${this.userConfig.githubCLIPath}`, + }; + } + console.warn(`[GitHub CLI] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/gh', + '/usr/local/bin/gh', + ]; + + for (const ghPath of homebrewPaths) { + if (await existsAsync(ghPath)) { + const validation = await this.validateGitHubCLIAsync(ghPath); + if (validation.valid) { + return { + found: true, + path: ghPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew GitHub CLI: ${ghPath}`, + }; + } + } + } + } + + // 3. System PATH (augmented) + const ghPath = await findExecutableAsync('gh'); + if (ghPath) { + const validation = await this.validateGitHubCLIAsync(ghPath); + if (validation.valid) { + return { + found: true, + path: ghPath, + version: validation.version, + source: 'system-path', + message: `Using system GitHub CLI: ${ghPath}`, + }; + } + } + + // 4. Windows Program Files + if (process.platform === 'win32') { + const windowsPaths = [ + 'C:\\Program Files\\GitHub CLI\\gh.exe', + 'C:\\Program Files (x86)\\GitHub CLI\\gh.exe', + ]; + + for (const winGhPath of windowsPaths) { + if (await existsAsync(winGhPath)) { + const validation = await this.validateGitHubCLIAsync(winGhPath); + if (validation.valid) { + return { + found: true, + path: winGhPath, + version: validation.version, + source: 'system-path', + message: `Using Windows GitHub CLI: ${winGhPath}`, + }; + } + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: 'GitHub CLI (gh) not found. Install from https://cli.github.com', + }; + } + + /** + * Get bundled Python path for packaged apps + * + * Only available in packaged Electron apps where Python is bundled + * in the resources directory. + * + * @returns Path to bundled Python or null if not found + */ + private getBundledPythonPath(): string | null { + if (!app.isPackaged) { + return null; + } + + const resourcesPath = process.resourcesPath; + const isWindows = process.platform === 'win32'; + + const pythonPath = isWindows + ? path.join(resourcesPath, 'python', 'python.exe') + : path.join(resourcesPath, 'python', 'bin', 'python3'); + + return existsSync(pythonPath) ? pythonPath : null; + } + + /** + * Find Homebrew Python on macOS + * Delegates to shared utility function. + * + * @returns Path to Homebrew Python or null if not found + */ + private findHomebrewPython(): string | null { + return findHomebrewPythonUtil( + (pythonPath) => this.validatePython(pythonPath), + '[CLI Tools]' + ); + } + + /** + * Clear cache manually + * + * Useful for testing or forcing re-detection. + * Normally not needed as cache is cleared automatically on settings change. + */ + clearCache(): void { + this.cache.clear(); + console.warn('[CLI Tools] Cache cleared'); + } + + /** + * Get tool detection info for diagnostics + * + * Performs fresh detection without using cache. + * Useful for Settings UI to show current detection status. + * + * @param tool - The tool to get detection info for + * @returns Detection result with full metadata + */ + getToolInfo(tool: CLITool): ToolDetectionResult { + return this.detectToolPath(tool); + } +} + +// Singleton instance +const cliToolManager = new CLIToolManager(); + +/** + * Get the path for a CLI tool + * + * Convenience function for accessing the tool manager singleton. + * Uses cached path if available, otherwise auto-detects. + * + * @param tool - The CLI tool to get the path for + * @returns The resolved path to the tool executable + * + * @example + * ```typescript + * import { getToolPath } from './cli-tool-manager'; + * + * const pythonPath = getToolPath('python'); + * const gitPath = getToolPath('git'); + * const ghPath = getToolPath('gh'); + * + * execSync(`${gitPath} status`, { cwd: projectPath }); + * ``` + */ +export function getToolPath(tool: CLITool): string { + return cliToolManager.getToolPath(tool); +} + +/** + * Configure CLI tools with user settings + * + * Call this when user updates CLI tool paths in Settings. + * Clears cache to force re-detection with new configuration. + * + * @param config - User configuration for CLI tool paths + * + * @example + * ```typescript + * import { configureTools } from './cli-tool-manager'; + * + * // When settings are loaded or updated + * configureTools({ + * pythonPath: settings.pythonPath, + * gitPath: settings.gitPath, + * githubCLIPath: settings.githubCLIPath, + * }); + * ``` + */ +export function configureTools(config: ToolConfig): void { + cliToolManager.configure(config); } /** @@ -951,3 +1752,52 @@ export function clearToolCache(): void { export function isPathFromWrongPlatform(pathStr: string | undefined): boolean { return isWrongPlatformPath(pathStr); } + +// ============================================================================ +// ASYNC EXPORTS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Get the path for a CLI tool asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses cached path if available, otherwise detects asynchronously. + * + * @param tool - The CLI tool to get the path for + * @returns Promise resolving to the tool path + * + * @example + * ```typescript + * import { getToolPathAsync } from './cli-tool-manager'; + * + * const claudePath = await getToolPathAsync('claude'); + * ``` + */ +export async function getToolPathAsync(tool: CLITool): Promise { + return cliToolManager.getToolPathAsync(tool); +} + +/** + * Pre-warm the CLI tool cache asynchronously + * + * Call this during app startup to detect tools in the background. + * Subsequent calls to getToolPath/getToolPathAsync will use cached values. + * + * @param tools - Array of tools to pre-warm (defaults to ['claude']) + * + * @example + * ```typescript + * import { preWarmToolCache } from './cli-tool-manager'; + * + * // In app startup + * app.whenReady().then(() => { + * // ... setup code ... + * preWarmToolCache(['claude', 'git', 'gh']); + * }); + * ``` + */ +export async function preWarmToolCache(tools: CLITool[] = ['claude']): Promise { + console.warn('[CLI Tools] Pre-warming cache for:', tools.join(', ')); + await Promise.all(tools.map(tool => cliToolManager.getToolPathAsync(tool))); + console.warn('[CLI Tools] Cache pre-warming complete'); +} diff --git a/apps/frontend/src/main/env-utils.ts b/apps/frontend/src/main/env-utils.ts index 9a1325ce15..bf863c8f73 100644 --- a/apps/frontend/src/main/env-utils.ts +++ b/apps/frontend/src/main/env-utils.ts @@ -12,7 +12,32 @@ import * as os from 'os'; import * as path from 'path'; import * as fs from 'fs'; -import { execFileSync } from 'child_process'; +import { promises as fsPromises } from 'fs'; +import { execFileSync, execFile } from 'child_process'; +import { promisify } from 'util'; + +const execFileAsync = promisify(execFile); + +/** + * Check if a path exists asynchronously (non-blocking) + * + * Uses fs.promises.access which is non-blocking, unlike fs.existsSync. + * + * @param filePath - The path to check + * @returns Promise resolving to true if path exists, false otherwise + */ +async function existsAsync(filePath: string): Promise { + try { + await fsPromises.access(filePath); + return true; + } catch { + return false; + } +} + +// Cache for npm global prefix to avoid repeated async calls +let npmGlobalPrefixCache: string | null | undefined = undefined; +let npmGlobalPrefixCachePromise: Promise | null = null; /** * Get npm global prefix directory dynamically @@ -30,10 +55,12 @@ function getNpmGlobalPrefix(): string | null { // On Windows, use npm.cmd for proper command resolution const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; - const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix'], { + // Use --location=global to bypass workspace context and avoid ENOWORKSPACES error + const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix', '--location=global'], { encoding: 'utf-8', timeout: 3000, windowsHide: true, + cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos shell: process.platform === 'win32', // Enable shell on Windows for .cmd resolution }).trim(); @@ -60,19 +87,22 @@ function getNpmGlobalPrefix(): string | null { * Common binary directories that should be in PATH * These are locations where commonly used tools are installed */ -const COMMON_BIN_PATHS: Record = { +export const COMMON_BIN_PATHS: Record = { darwin: [ '/opt/homebrew/bin', // Apple Silicon Homebrew '/usr/local/bin', // Intel Homebrew / system + '/usr/local/share/dotnet', // .NET SDK '/opt/homebrew/sbin', // Apple Silicon Homebrew sbin '/usr/local/sbin', // Intel Homebrew sbin '~/.local/bin', // User-local binaries (Claude CLI) + '~/.dotnet/tools', // .NET global tools ], linux: [ '/usr/local/bin', '/usr/bin', // System binaries (Python, etc.) '/snap/bin', // Snap packages '~/.local/bin', // User-local binaries + '~/.dotnet/tools', // .NET global tools '/usr/sbin', // System admin binaries ], win32: [ @@ -82,6 +112,71 @@ const COMMON_BIN_PATHS: Record = { ], }; +/** + * Get expanded platform paths for PATH augmentation + * + * Shared helper used by both sync and async getAugmentedEnv functions. + * Expands home directory (~) in paths and returns the list of candidate paths. + * + * @param additionalPaths - Optional additional paths to include + * @returns Array of expanded paths (without existence checking) + */ +function getExpandedPlatformPaths(additionalPaths?: string[]): string[] { + const platform = process.platform as 'darwin' | 'linux' | 'win32'; + const homeDir = os.homedir(); + + // Get platform-specific paths and expand home directory + const platformPaths = COMMON_BIN_PATHS[platform] || []; + const expandedPaths = platformPaths.map(p => + p.startsWith('~') ? p.replace('~', homeDir) : p + ); + + // Add user-requested additional paths (expanded) + if (additionalPaths) { + for (const p of additionalPaths) { + const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p; + expandedPaths.push(expanded); + } + } + + return expandedPaths; +} + +/** + * Build augmented PATH by filtering existing paths + * + * Shared helper that takes candidate paths and a set of current PATH entries, + * returning only paths that should be added. + * + * @param candidatePaths - Array of paths to consider adding + * @param currentPathSet - Set of paths already in PATH + * @param existingPaths - Array of paths that actually exist on the filesystem + * @param npmPrefix - npm global prefix path (or null if not found) + * @returns Array of paths to prepend to PATH + */ +function buildPathsToAdd( + candidatePaths: string[], + currentPathSet: Set, + existingPaths: Set, + npmPrefix: string | null +): string[] { + const pathsToAdd: string[] = []; + + // Add platform-specific paths that exist + for (const p of candidatePaths) { + if (!currentPathSet.has(p) && existingPaths.has(p)) { + pathsToAdd.push(p); + } + } + + // Add npm global prefix if it exists + if (npmPrefix && !currentPathSet.has(npmPrefix) && existingPaths.has(npmPrefix)) { + pathsToAdd.push(npmPrefix); + } + + return pathsToAdd; +} + /** * Get augmented environment with additional PATH entries * @@ -97,43 +192,24 @@ export function getAugmentedEnv(additionalPaths?: string[]): Record - p.startsWith('~') ? p.replace('~', homeDir) : p - ); + // Get all candidate paths (platform + additional) + const candidatePaths = getExpandedPlatformPaths(additionalPaths); // Collect paths to add (only if they exist and aren't already in PATH) const currentPath = env.PATH || ''; const currentPathSet = new Set(currentPath.split(pathSeparator)); - const pathsToAdd: string[] = []; + // Check existence synchronously and build existing paths set + const existingPaths = new Set(candidatePaths.filter(p => fs.existsSync(p))); - // Add platform-specific paths - for (const p of expandedPaths) { - if (!currentPathSet.has(p) && fs.existsSync(p)) { - pathsToAdd.push(p); - } - } - - // Add npm global prefix dynamically (cross-platform: works with standard npm, nvm, nvm-windows) + // Get npm global prefix dynamically const npmPrefix = getNpmGlobalPrefix(); - if (npmPrefix && !currentPathSet.has(npmPrefix) && fs.existsSync(npmPrefix)) { - pathsToAdd.push(npmPrefix); + if (npmPrefix && fs.existsSync(npmPrefix)) { + existingPaths.add(npmPrefix); } - // Add user-requested additional paths - if (additionalPaths) { - for (const p of additionalPaths) { - const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p; - if (!currentPathSet.has(expanded) && fs.existsSync(expanded)) { - pathsToAdd.push(expanded); - } - } - } + // Build final paths to add using shared helper + const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix); // Prepend new paths to PATH (prepend so they take priority) if (pathsToAdd.length > 0) { @@ -184,3 +260,149 @@ export function findExecutable(command: string): string | null { export function isCommandAvailable(command: string): boolean { return findExecutable(command) !== null; } + +// ============================================================================ +// ASYNC VERSIONS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Get npm global prefix directory asynchronously (non-blocking) + * + * Uses caching to avoid repeated subprocess calls. Safe to call from + * Electron main process without blocking the event loop. + * + * @returns Promise resolving to npm global binaries directory, or null + */ +async function getNpmGlobalPrefixAsync(): Promise { + // Return cached value if available + if (npmGlobalPrefixCache !== undefined) { + return npmGlobalPrefixCache; + } + + // If a fetch is already in progress, wait for it + if (npmGlobalPrefixCachePromise) { + return npmGlobalPrefixCachePromise; + } + + // Start the async fetch + npmGlobalPrefixCachePromise = (async () => { + try { + const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; + + const { stdout } = await execFileAsync(npmCommand, ['config', 'get', 'prefix', '--location=global'], { + encoding: 'utf-8', + timeout: 3000, + windowsHide: true, + cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos + shell: process.platform === 'win32', + }); + + const rawPrefix = stdout.trim(); + if (!rawPrefix) { + npmGlobalPrefixCache = null; + return null; + } + + const binPath = process.platform === 'win32' + ? rawPrefix + : path.join(rawPrefix, 'bin'); + + const normalizedPath = path.normalize(binPath); + npmGlobalPrefixCache = await existsAsync(normalizedPath) ? normalizedPath : null; + return npmGlobalPrefixCache; + } catch (error) { + console.warn(`[env-utils] Failed to get npm global prefix: ${error}`); + npmGlobalPrefixCache = null; + return null; + } finally { + npmGlobalPrefixCachePromise = null; + } + })(); + + return npmGlobalPrefixCachePromise; +} + +/** + * Get augmented environment asynchronously (non-blocking) + * + * Same as getAugmentedEnv but uses async npm prefix detection. + * Safe to call from Electron main process without blocking. + * + * @param additionalPaths - Optional array of additional paths to include + * @returns Promise resolving to environment object with augmented PATH + */ +export async function getAugmentedEnvAsync(additionalPaths?: string[]): Promise> { + const env = { ...process.env } as Record; + const platform = process.platform as 'darwin' | 'linux' | 'win32'; + const pathSeparator = platform === 'win32' ? ';' : ':'; + + // Get all candidate paths (platform + additional) + const candidatePaths = getExpandedPlatformPaths(additionalPaths); + + // Collect paths to add (only if they exist and aren't already in PATH) + const currentPath = env.PATH || ''; + const currentPathSet = new Set(currentPath.split(pathSeparator)); + + // Check existence asynchronously in parallel for performance + const pathChecks = await Promise.all( + candidatePaths.map(async (p) => ({ path: p, exists: await existsAsync(p) })) + ); + const existingPaths = new Set( + pathChecks.filter(({ exists }) => exists).map(({ path: p }) => p) + ); + + // Get npm global prefix dynamically (async - non-blocking) + const npmPrefix = await getNpmGlobalPrefixAsync(); + if (npmPrefix && await existsAsync(npmPrefix)) { + existingPaths.add(npmPrefix); + } + + // Build final paths to add using shared helper + const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix); + + // Prepend new paths to PATH (prepend so they take priority) + if (pathsToAdd.length > 0) { + env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator); + } + + return env; +} + +/** + * Find the full path to an executable asynchronously (non-blocking) + * + * Same as findExecutable but uses async environment augmentation. + * + * @param command - The command name to find (e.g., 'gh', 'git') + * @returns Promise resolving to the full path to the executable, or null + */ +export async function findExecutableAsync(command: string): Promise { + const env = await getAugmentedEnvAsync(); + const pathSeparator = process.platform === 'win32' ? ';' : ':'; + const pathDirs = (env.PATH || '').split(pathSeparator); + + const extensions = process.platform === 'win32' + ? ['.exe', '.cmd', '.bat', '.ps1', ''] + : ['']; + + for (const dir of pathDirs) { + for (const ext of extensions) { + const fullPath = path.join(dir, command + ext); + if (await existsAsync(fullPath)) { + return fullPath; + } + } + } + + return null; +} + +/** + * Clear the npm global prefix cache + * + * Call this if npm configuration changes and you need fresh detection. + */ +export function clearNpmPrefixCache(): void { + npmGlobalPrefixCache = undefined; + npmGlobalPrefixCachePromise = null; +} diff --git a/apps/frontend/src/main/index.ts b/apps/frontend/src/main/index.ts index 7cd856a0fe..8ee2eaf76c 100644 --- a/apps/frontend/src/main/index.ts +++ b/apps/frontend/src/main/index.ts @@ -1,6 +1,28 @@ -import { app, BrowserWindow, shell, nativeImage } from 'electron'; +// Load .env file FIRST before any other imports that might use process.env +import { config } from 'dotenv'; +import { resolve, dirname } from 'path'; +import { existsSync } from 'fs'; + +// Load .env from apps/frontend directory +// In development: __dirname is out/main (compiled), so go up 2 levels +// In production: app resources directory +const possibleEnvPaths = [ + resolve(__dirname, '../../.env'), // Development: out/main -> apps/frontend/.env + resolve(__dirname, '../../../.env'), // Alternative: might be in different location + resolve(process.cwd(), 'apps/frontend/.env'), // Fallback: from workspace root +]; + +for (const envPath of possibleEnvPaths) { + if (existsSync(envPath)) { + config({ path: envPath }); + console.log(`[dotenv] Loaded environment from: ${envPath}`); + break; + } +} + +import { app, BrowserWindow, shell, nativeImage, session, screen } from 'electron'; import { join } from 'path'; -import { accessSync, readFileSync, writeFileSync } from 'fs'; +import { accessSync, readFileSync, writeFileSync, rmSync } from 'fs'; import { electronApp, optimizer, is } from '@electron-toolkit/utils'; import { setupIpcHandlers } from './ipc-setup'; import { AgentManager } from './agent'; @@ -12,11 +34,34 @@ import { initializeAppUpdater } from './app-updater'; import { DEFAULT_APP_SETTINGS } from '../shared/constants'; import { readSettingsFile } from './settings-utils'; import { setupErrorLogging } from './app-logger'; +import { initSentryMain } from './sentry'; +import { preWarmToolCache } from './cli-tool-manager'; +import { initializeClaudeProfileManager } from './claude-profile-manager'; import type { AppSettings } from '../shared/types'; +// ───────────────────────────────────────────────────────────────────────────── +// Window sizing constants +// ───────────────────────────────────────────────────────────────────────────── +/** Preferred window width on startup */ +const WINDOW_PREFERRED_WIDTH: number = 1400; +/** Preferred window height on startup */ +const WINDOW_PREFERRED_HEIGHT: number = 900; +/** Absolute minimum window width (supports high DPI displays with scaling) */ +const WINDOW_MIN_WIDTH: number = 800; +/** Absolute minimum window height (supports high DPI displays with scaling) */ +const WINDOW_MIN_HEIGHT: number = 500; +/** Margin from screen edges to avoid edge-to-edge windows */ +const WINDOW_SCREEN_MARGIN: number = 20; +/** Default screen dimensions used as fallback when screen.getPrimaryDisplay() fails */ +const DEFAULT_SCREEN_WIDTH: number = 1920; +const DEFAULT_SCREEN_HEIGHT: number = 1080; + // Setup error logging early (captures uncaught exceptions) setupErrorLogging(); +// Initialize Sentry for error tracking (respects user's sentryEnabled setting) +initSentryMain(); + /** * Load app settings synchronously (for use during startup). * This is a simple merge with defaults - no migrations or auto-detection. @@ -26,6 +71,32 @@ function loadSettingsSync(): AppSettings { return { ...DEFAULT_APP_SETTINGS, ...savedSettings } as AppSettings; } +/** + * Clean up stale update metadata files from the redundant source updater system. + * + * The old "source updater" wrote .update-metadata.json files that could persist + * across app updates and cause version display desync. This cleanup ensures + * we use the actual bundled version from app.getVersion(). + */ +function cleanupStaleUpdateMetadata(): void { + const userData = app.getPath('userData'); + const stalePaths = [ + join(userData, 'auto-claude-source'), + join(userData, 'backend-source'), + ]; + + for (const stalePath of stalePaths) { + if (existsSync(stalePath)) { + try { + rmSync(stalePath, { recursive: true, force: true }); + console.warn(`[main] Cleaned up stale update metadata: ${stalePath}`); + } catch (e) { + console.warn(`[main] Failed to clean up stale metadata at ${stalePath}:`, e); + } + } + } +} + // Get icon path based on platform function getIconPath(): string { // In dev mode, __dirname is out/main, so we go up to project root then into resources @@ -54,12 +125,51 @@ let agentManager: AgentManager | null = null; let terminalManager: TerminalManager | null = null; function createWindow(): void { + // Get the primary display's work area (accounts for taskbar, dock, etc.) + // Wrapped in try/catch to handle potential failures with fallback to safe defaults + let workAreaSize: { width: number; height: number }; + try { + const display = screen.getPrimaryDisplay(); + // Validate the returned object has expected structure with valid dimensions + if ( + display && + display.workAreaSize && + typeof display.workAreaSize.width === 'number' && + typeof display.workAreaSize.height === 'number' && + display.workAreaSize.width > 0 && + display.workAreaSize.height > 0 + ) { + workAreaSize = display.workAreaSize; + } else { + console.error( + '[main] screen.getPrimaryDisplay() returned unexpected structure:', + JSON.stringify(display) + ); + workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT }; + } + } catch (error: unknown) { + console.error('[main] Failed to get primary display, using fallback dimensions:', error); + workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT }; + } + + // Calculate available space with a small margin to avoid edge-to-edge windows + const availableWidth: number = workAreaSize.width - WINDOW_SCREEN_MARGIN; + const availableHeight: number = workAreaSize.height - WINDOW_SCREEN_MARGIN; + + // Calculate actual dimensions (preferred, but capped to margin-adjusted available space) + const width: number = Math.min(WINDOW_PREFERRED_WIDTH, availableWidth); + const height: number = Math.min(WINDOW_PREFERRED_HEIGHT, availableHeight); + + // Ensure minimum dimensions don't exceed the actual initial window size + const minWidth: number = Math.min(WINDOW_MIN_WIDTH, width); + const minHeight: number = Math.min(WINDOW_MIN_HEIGHT, height); + // Create the browser window mainWindow = new BrowserWindow({ - width: 1400, - height: 900, - minWidth: 1000, - minHeight: 700, + width, + height, + minWidth, + minHeight, show: false, autoHideMenuBar: true, titleBarStyle: 'hiddenInset', @@ -110,11 +220,29 @@ if (process.platform === 'darwin') { app.name = 'Auto Claude'; } +// Fix Windows GPU cache permission errors (0x5 Access Denied) +if (process.platform === 'win32') { + app.commandLine.appendSwitch('disable-gpu-shader-disk-cache'); + app.commandLine.appendSwitch('disable-gpu-program-cache'); + console.log('[main] Applied Windows GPU cache fixes'); +} + // Initialize the application app.whenReady().then(() => { // Set app user model id for Windows electronApp.setAppUserModelId('com.autoclaude.ui'); + // Clear cache on Windows to prevent permission errors from stale cache + if (process.platform === 'win32') { + session.defaultSession.clearCache() + .then(() => console.log('[main] Cleared cache on startup')) + .catch((err) => console.warn('[main] Failed to clear cache:', err)); + } + + // Clean up stale update metadata from the old source updater system + // This prevents version display desync after electron-updater installs a new version + cleanupStaleUpdateMetadata(); + // Set dock icon on macOS if (process.platform === 'darwin') { const iconPath = getIconPath(); @@ -222,6 +350,23 @@ app.whenReady().then(() => { // Create window createWindow(); + // Pre-warm CLI tool cache in background (non-blocking) + // This ensures CLI detection is done before user needs it + // Include all commonly used tools to prevent sync blocking on first use + setImmediate(() => { + preWarmToolCache(['claude', 'git', 'gh', 'python']).catch((error) => { + console.warn('[main] Failed to pre-warm CLI cache:', error); + }); + }); + + // Pre-initialize Claude profile manager in background (non-blocking) + // This ensures profile data is loaded before user clicks "Start Claude Code" + setImmediate(() => { + initializeClaudeProfileManager().catch((error) => { + console.warn('[main] Failed to pre-initialize profile manager:', error); + }); + }); + // Initialize usage monitoring after window is created if (mainWindow) { // Setup event forwarding from usage monitor to renderer diff --git a/apps/frontend/src/main/insights/config.ts b/apps/frontend/src/main/insights/config.ts index 0ca1609c13..97e8a9a28d 100644 --- a/apps/frontend/src/main/insights/config.ts +++ b/apps/frontend/src/main/insights/config.ts @@ -1,9 +1,12 @@ import path from 'path'; import { existsSync, readFileSync } from 'fs'; -import { app } from 'electron'; import { getProfileEnv } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; +import { getOAuthModeClearVars } from '../agent/env-utils'; +import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager'; import { getValidatedPythonPath } from '../python-detector'; -import { getConfiguredPythonPath } from '../python-env-manager'; +import { getAugmentedEnv } from '../env-utils'; +import { getEffectiveSourcePath } from '../updater/path-resolver'; /** * Configuration manager for insights service @@ -40,24 +43,23 @@ export class InsightsConfig { /** * Get the auto-claude source path (detects automatically if not configured) + * Uses getEffectiveSourcePath() which handles userData override for user-updated backend */ getAutoBuildSourcePath(): string | null { if (this.autoBuildSourcePath && existsSync(this.autoBuildSourcePath)) { return this.autoBuildSourcePath; } - const possiblePaths = [ - // Apps structure: from out/main -> apps/backend - path.resolve(__dirname, '..', '..', '..', 'backend'), - path.resolve(app.getAppPath(), '..', 'backend'), - path.resolve(process.cwd(), 'apps', 'backend') - ]; - - for (const p of possiblePaths) { - if (existsSync(p) && existsSync(path.join(p, 'runners', 'spec_runner.py'))) { - return p; - } + // Use shared path resolver which handles: + // 1. User settings (autoBuildPath) + // 2. userData override (backend-source) for user-updated backend + // 3. Bundled backend (process.resourcesPath/backend) + // 4. Development paths + const effectivePath = getEffectiveSourcePath(); + if (existsSync(effectivePath) && existsSync(path.join(effectivePath, 'runners', 'spec_runner.py'))) { + return effectivePath; } + return null; } @@ -104,17 +106,51 @@ export class InsightsConfig { * Get complete environment for process execution * Includes system env, auto-claude env, and active Claude profile */ - getProcessEnv(): Record { + async getProcessEnv(): Promise> { const autoBuildEnv = this.loadAutoBuildEnv(); const profileEnv = getProfileEnv(); + const apiProfileEnv = await getAPIProfileEnv(); + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + const pythonEnv = pythonEnvManager.getPythonEnv(); + const autoBuildSource = this.getAutoBuildSourcePath(); + const pythonPathParts = (pythonEnv.PYTHONPATH ?? '') + .split(path.delimiter) + .map((entry) => entry.trim()) + .filter(Boolean) + .map((entry) => path.resolve(entry)); + + if (autoBuildSource) { + const normalizedAutoBuildSource = path.resolve(autoBuildSource); + const autoBuildComparator = process.platform === 'win32' + ? normalizedAutoBuildSource.toLowerCase() + : normalizedAutoBuildSource; + const hasAutoBuildSource = pythonPathParts.some((entry) => { + const candidate = process.platform === 'win32' ? entry.toLowerCase() : entry; + return candidate === autoBuildComparator; + }); + + if (!hasAutoBuildSource) { + pythonPathParts.push(normalizedAutoBuildSource); + } + } + + const combinedPythonPath = pythonPathParts.join(path.delimiter); + + // Use getAugmentedEnv() to ensure common tool paths (claude, dotnet, etc.) + // are available even when app is launched from Finder/Dock. + const augmentedEnv = getAugmentedEnv(); return { - ...process.env as Record, + ...augmentedEnv, + ...pythonEnv, // Include PYTHONPATH for bundled site-packages ...autoBuildEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', - PYTHONUTF8: '1' + PYTHONUTF8: '1', + ...(combinedPythonPath ? { PYTHONPATH: combinedPythonPath } : {}) }; } } diff --git a/apps/frontend/src/main/insights/insights-executor.ts b/apps/frontend/src/main/insights/insights-executor.ts index d5565620fe..0c349b3480 100644 --- a/apps/frontend/src/main/insights/insights-executor.ts +++ b/apps/frontend/src/main/insights/insights-executor.ts @@ -85,7 +85,7 @@ export class InsightsExecutor extends EventEmitter { } as InsightsChatStatus); // Get process environment - const processEnv = this.config.getProcessEnv(); + const processEnv = await this.config.getProcessEnv(); // Write conversation history to temp file to avoid Windows command-line length limit const historyFile = path.join( @@ -130,6 +130,7 @@ export class InsightsExecutor extends EventEmitter { let suggestedTask: InsightsChatMessage['suggestedTask'] | undefined; const toolsUsed: InsightsToolUsage[] = []; let allInsightsOutput = ''; + let stderrOutput = ''; proc.stdout?.on('data', (data: Buffer) => { const text = data.toString(); @@ -159,8 +160,9 @@ export class InsightsExecutor extends EventEmitter { proc.stderr?.on('data', (data: Buffer) => { const text = data.toString(); - // Collect stderr for rate limit detection too + // Collect stderr for rate limit detection and error reporting allInsightsOutput = (allInsightsOutput + text).slice(-10000); + stderrOutput = (stderrOutput + text).slice(-2000); console.error('[Insights]', text); }); @@ -196,7 +198,11 @@ export class InsightsExecutor extends EventEmitter { toolsUsed }); } else { - const error = `Process exited with code ${code}`; + // Include stderr output in error message for debugging + const stderrSummary = stderrOutput.trim() + ? `\n\nError output:\n${stderrOutput.slice(-500)}` + : ''; + const error = `Process exited with code ${code}${stderrSummary}`; this.emit('stream-chunk', projectId, { type: 'error', error diff --git a/apps/frontend/src/main/ipc-handlers/ado/index.ts b/apps/frontend/src/main/ipc-handlers/ado/index.ts new file mode 100644 index 0000000000..25e52037d6 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/ado/index.ts @@ -0,0 +1,23 @@ +/** + * Azure DevOps integration IPC handlers + * + * Main entry point that registers all ADO-related handlers. + * Handlers are organized into modules by functionality: + * - workitem-handlers: Work item (issue) fetching and management + * - pr-handlers: Pull request operations and reviews + */ + +import { registerWorkItemHandlers } from './workitem-handlers'; +import { registerPRHandlers } from './pr-handlers'; + +/** + * Register all Azure DevOps IPC handlers + */ +export function registerADOHandlers(): void { + registerWorkItemHandlers(); + registerPRHandlers(); +} + +// Re-export utilities for potential external use +export { getADOConfig, adoFetch } from './utils'; +export type { ADOConfig, ADOWorkItem, ADOPullRequest } from './types'; diff --git a/apps/frontend/src/main/ipc-handlers/ado/pr-handlers.ts b/apps/frontend/src/main/ipc-handlers/ado/pr-handlers.ts new file mode 100644 index 0000000000..fbbef26778 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/ado/pr-handlers.ts @@ -0,0 +1,344 @@ +/** + * Azure DevOps Pull Request IPC handlers + */ + +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { IPCResult } from '../../../shared/types'; +import { projectStore } from '../../project-store'; +import { + getADOConfig, + adoFetch, + normalizePRState, + extractBranchName, +} from './utils'; +import type { ADOAPIPullRequest, ADOPullRequest } from './types'; + +/** + * Transform ADO API PR to application format + */ +function transformPR(pr: ADOAPIPullRequest, config: { instanceUrl: string; organization: string; project: string; repoName: string }): ADOPullRequest { + return { + id: pr.pullRequestId, + number: pr.pullRequestId, + title: pr.title, + body: pr.description, + state: normalizePRState(pr.status), + author: { + login: pr.createdBy.uniqueName, + displayName: pr.createdBy.displayName, + avatarUrl: pr.createdBy.imageUrl, + }, + sourceBranch: extractBranchName(pr.sourceRefName), + targetBranch: extractBranchName(pr.targetRefName), + isDraft: pr.isDraft || false, + mergeStatus: pr.mergeStatus, + reviewers: (pr.reviewers || []).map(r => ({ + login: r.uniqueName, + displayName: r.displayName, + avatarUrl: r.imageUrl, + vote: r.vote, + })), + labels: (pr.labels || []).map(l => l.name), + createdAt: pr.creationDate, + updatedAt: pr.creationDate, // ADO doesn't have a separate updated date + closedAt: pr.closedDate, + url: pr.url, + htmlUrl: pr._links?.web?.href || `${config.instanceUrl}/${config.organization}/${config.project}/_git/${config.repoName}/pullrequest/${pr.pullRequestId}`, + }; +} + +/** + * List pull requests + */ +export function registerListPRs(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_LIST, + async (_, projectId: string, status: 'active' | 'completed' | 'abandoned' | 'all' = 'active'): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const prsResult = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests?searchCriteria.status=${status}&$top=100` + ) as { value: ADOAPIPullRequest[] }; + + const result: ADOPullRequest[] = prsResult.value.map(pr => + transformPR(pr, config) + ); + + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch pull requests', + }; + } + } + ); +} + +/** + * Get a single PR by ID + */ +export function registerGetPR(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_GET, + async (_, projectId: string, prId: number): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const pr = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}` + ) as ADOAPIPullRequest; + + const result = transformPR(pr, config); + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch pull request', + }; + } + } + ); +} + +/** + * Get PR diff/changes + */ +export function registerGetPRDiff(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_GET_DIFF, + async (_, projectId: string, prId: number): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + // Get PR iterations (versions) + const iterationsResult = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}/iterations` + ) as { value: Array<{ id: number }> }; + + if (!iterationsResult.value || iterationsResult.value.length === 0) { + return { success: true, data: '' }; + } + + // Get changes from the latest iteration + const latestIteration = iterationsResult.value[iterationsResult.value.length - 1]; + const changesResult = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}/iterations/${latestIteration.id}/changes` + ) as { changeEntries?: Array<{ changeType: string; item?: { path: string } }> }; + + // Build a summary of changes + const diffLines: string[] = []; + for (const change of changesResult.changeEntries || []) { + const changeType = change.changeType || 'edit'; + const path = change.item?.path || ''; + diffLines.push(`--- ${changeType}: ${path}`); + } + + return { success: true, data: diffLines.join('\n') }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch PR diff', + }; + } + } + ); +} + +/** + * Post a review comment on a PR + */ +export function registerPostPRReview(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_POST_REVIEW, + async ( + _, + projectId: string, + prId: number, + comment: string, + vote?: number // -10 = rejected, 0 = none, 5 = approved with suggestions, 10 = approved + ): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + // Create a comment thread + const threadResult = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}/threads`, + { + method: 'POST', + body: JSON.stringify({ + comments: [{ content: comment }], + status: 'active', + }), + } + ) as { id: number }; + + // If vote is provided, update reviewer vote + if (vote !== undefined) { + await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}/reviewers/me`, + { + method: 'PUT', + body: JSON.stringify({ vote }), + } + ); + } + + return { success: true, data: threadResult.id }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to post review', + }; + } + } + ); +} + +/** + * Merge a PR + */ +export function registerMergePR(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_MERGE, + async ( + _, + projectId: string, + prId: number, + mergeStrategy: 'squash' | 'rebase' | 'noFastForward' = 'squash', + deleteSourceBranch: boolean = true + ): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + // Get PR to get the last merge source commit + const pr = await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}` + ) as ADOAPIPullRequest & { lastMergeSourceCommit?: { commitId: string } }; + + // Complete the PR + await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}`, + { + method: 'PATCH', + body: JSON.stringify({ + status: 'completed', + lastMergeSourceCommit: pr.lastMergeSourceCommit, + completionOptions: { + deleteSourceBranch, + mergeStrategy, + }, + }), + } + ); + + return { success: true, data: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to merge pull request', + }; + } + } + ); +} + +/** + * Abandon (close) a PR + */ +export function registerAbandonPR(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_PR_ABANDON, + async (_, projectId: string, prId: number): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + await adoFetch( + config, + `/git/repositories/${config.repoName}/pullrequests/${prId}`, + { + method: 'PATCH', + body: JSON.stringify({ status: 'abandoned' }), + } + ); + + return { success: true, data: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to abandon pull request', + }; + } + } + ); +} + +/** + * Register all PR handlers + */ +export function registerPRHandlers(): void { + registerListPRs(); + registerGetPR(); + registerGetPRDiff(); + registerPostPRReview(); + registerMergePR(); + registerAbandonPR(); +} diff --git a/apps/frontend/src/main/ipc-handlers/ado/types.ts b/apps/frontend/src/main/ipc-handlers/ado/types.ts new file mode 100644 index 0000000000..a5e8d55b2f --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/ado/types.ts @@ -0,0 +1,153 @@ +/** + * Azure DevOps module types and interfaces + */ + +export interface ADOConfig { + organization: string; + project: string; + repoName: string; + pat: string; + instanceUrl: string; +} + +export interface ADOAPIWorkItem { + id: number; + rev: number; + fields: { + 'System.Id': number; + 'System.Title': string; + 'System.Description'?: string; + 'System.State': string; + 'System.WorkItemType': string; + 'System.Tags'?: string; + 'System.AssignedTo'?: { + displayName: string; + uniqueName: string; + imageUrl?: string; + }; + 'System.CreatedBy': { + displayName: string; + uniqueName: string; + imageUrl?: string; + }; + 'System.CreatedDate': string; + 'System.ChangedDate': string; + 'System.IterationPath'?: string; + 'System.AreaPath'?: string; + 'Microsoft.VSTS.Common.Priority'?: number; + }; + url: string; + _links?: { + html?: { href: string }; + }; +} + +export interface ADOAPIRepository { + id: string; + name: string; + defaultBranch: string; + webUrl: string; + project: { + id: string; + name: string; + state: string; + }; +} + +export interface ADOAPIPullRequest { + pullRequestId: number; + title: string; + description?: string; + status: 'active' | 'abandoned' | 'completed' | 'all'; + createdBy: { + displayName: string; + uniqueName: string; + imageUrl?: string; + }; + creationDate: string; + closedDate?: string; + sourceRefName: string; + targetRefName: string; + mergeStatus?: string; + isDraft?: boolean; + reviewers?: Array<{ + displayName: string; + uniqueName: string; + vote: number; + imageUrl?: string; + }>; + labels?: Array<{ name: string }>; + url: string; + _links?: { + web?: { href: string }; + }; +} + +export interface ADOAPIComment { + id: number; + content: string; + author: { + displayName: string; + uniqueName: string; + imageUrl?: string; + }; + publishedDate: string; + lastUpdatedDate: string; +} + +export interface ADOWorkItem { + id: number; + number: number; // Alias for id, for compatibility with GitHub issues + title: string; + body?: string; + state: 'open' | 'closed'; + workItemType: string; + tags: string[]; + assignees: Array<{ + login: string; + displayName: string; + avatarUrl?: string; + }>; + author: { + login: string; + displayName: string; + avatarUrl?: string; + }; + priority?: number; + iteration?: string; + areaPath?: string; + createdAt: string; + updatedAt: string; + closedAt?: string; + url: string; + htmlUrl: string; +} + +export interface ADOPullRequest { + id: number; + number: number; // Alias for pullRequestId + title: string; + body?: string; + state: 'open' | 'closed' | 'merged'; + author: { + login: string; + displayName: string; + avatarUrl?: string; + }; + sourceBranch: string; + targetBranch: string; + isDraft: boolean; + mergeStatus?: string; + reviewers: Array<{ + login: string; + displayName: string; + avatarUrl?: string; + vote: number; // -10 = rejected, 0 = no vote, 5 = approved with suggestions, 10 = approved + }>; + labels: string[]; + createdAt: string; + updatedAt: string; + closedAt?: string; + url: string; + htmlUrl: string; +} diff --git a/apps/frontend/src/main/ipc-handlers/ado/utils.ts b/apps/frontend/src/main/ipc-handlers/ado/utils.ts new file mode 100644 index 0000000000..f58fc42013 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/ado/utils.ts @@ -0,0 +1,156 @@ +/** + * Azure DevOps utility functions + */ + +import { existsSync, readFileSync } from 'fs'; +import path from 'path'; +import type { Project } from '../../../shared/types'; +import { parseEnvFile } from '../utils'; +import type { ADOConfig } from './types'; + +/** + * Get Azure DevOps configuration from project environment file + */ +export function getADOConfig(project: Project): ADOConfig | null { + if (!project.autoBuildPath) return null; + const envPath = path.join(project.path, project.autoBuildPath, '.env'); + if (!existsSync(envPath)) return null; + + try { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + + const organization = vars['ADO_ORGANIZATION']; + const projectName = vars['ADO_PROJECT']; + const pat = vars['ADO_PAT']; + const repoName = vars['ADO_REPO_NAME'] || projectName; + const instanceUrl = vars['ADO_INSTANCE_URL'] || 'https://dev.azure.com'; + + if (!organization || !projectName || !pat) return null; + + return { + organization, + project: projectName, + repoName: repoName || '', + pat, + instanceUrl, + }; + } catch { + return null; + } +} + +/** + * Build the base URL for Azure DevOps API calls + */ +export function buildADOApiUrl(config: ADOConfig, endpoint: string): string { + // ADO API URL format: https://dev.azure.com/{organization}/{project}/_apis/{endpoint} + const baseUrl = `${config.instanceUrl}/${config.organization}/${config.project}/_apis`; + const separator = endpoint.includes('?') ? '&' : '?'; + return `${baseUrl}${endpoint}${separator}api-version=7.1`; +} + +/** + * Make a request to the Azure DevOps API + */ +export async function adoFetch( + config: ADOConfig, + endpoint: string, + options: RequestInit = {} +): Promise { + const url = buildADOApiUrl(config, endpoint); + + // ADO uses Basic auth with PAT (empty username, PAT as password) + const auth = Buffer.from(`:${config.pat}`).toString('base64'); + + const response = await fetch(url, { + ...options, + headers: { + 'Accept': 'application/json', + 'Authorization': `Basic ${auth}`, + 'Content-Type': 'application/json', + ...options.headers, + }, + }); + + if (!response.ok) { + const errorBody = await response.text(); + throw new Error(`Azure DevOps API error: ${response.status} ${response.statusText} - ${errorBody}`); + } + + return response.json(); +} + +/** + * Make a PATCH request to the Azure DevOps API (for work item updates) + */ +export async function adoPatch( + config: ADOConfig, + endpoint: string, + operations: Array<{ op: string; path: string; value?: unknown }> +): Promise { + const url = buildADOApiUrl(config, endpoint); + const auth = Buffer.from(`:${config.pat}`).toString('base64'); + + const response = await fetch(url, { + method: 'PATCH', + headers: { + 'Accept': 'application/json', + 'Authorization': `Basic ${auth}`, + 'Content-Type': 'application/json-patch+json', // Required for work item updates + }, + body: JSON.stringify(operations), + }); + + if (!response.ok) { + const errorBody = await response.text(); + throw new Error(`Azure DevOps API error: ${response.status} ${response.statusText} - ${errorBody}`); + } + + return response.json(); +} + +/** + * Sanitize a string for use in WIQL queries + * Prevents WIQL injection attacks + */ +export function sanitizeWiqlString(value: string): string { + if (!value) return ''; + // Escape single quotes (WIQL string delimiter) + let sanitized = value.replace(/'/g, "''"); + // Remove control characters + sanitized = sanitized.replace(/[\x00-\x1f\x7f-\x9f]/g, ''); + // Limit length + return sanitized.slice(0, 500); +} + +/** + * Parse ADO work item state to normalized open/closed + */ +export function normalizeWorkItemState(state: string): 'open' | 'closed' { + const closedStates = ['closed', 'resolved', 'done', 'removed']; + return closedStates.includes(state.toLowerCase()) ? 'closed' : 'open'; +} + +/** + * Parse ADO PR status to normalized state + */ +export function normalizePRState(status: string): 'open' | 'closed' | 'merged' { + switch (status.toLowerCase()) { + case 'completed': + return 'merged'; + case 'abandoned': + return 'closed'; + case 'active': + default: + return 'open'; + } +} + +/** + * Extract branch name from ADO ref format + * e.g., "refs/heads/main" -> "main" + */ +export function extractBranchName(refName: string): string { + return refName.replace('refs/heads/', ''); +} diff --git a/apps/frontend/src/main/ipc-handlers/ado/workitem-handlers.ts b/apps/frontend/src/main/ipc-handlers/ado/workitem-handlers.ts new file mode 100644 index 0000000000..4586199fb3 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/ado/workitem-handlers.ts @@ -0,0 +1,390 @@ +/** + * Azure DevOps work item (issue) IPC handlers + */ + +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { IPCResult } from '../../../shared/types'; +import { projectStore } from '../../project-store'; +import { + getADOConfig, + adoFetch, + adoPatch, + sanitizeWiqlString, + normalizeWorkItemState, +} from './utils'; +import type { ADOAPIWorkItem, ADOAPIComment, ADOWorkItem } from './types'; + +/** + * Transform ADO API work item to application format + */ +function transformWorkItem(wi: ADOAPIWorkItem, config: { instanceUrl: string; organization: string; project: string }): ADOWorkItem { + const fields = wi.fields; + const state = normalizeWorkItemState(fields['System.State']); + + const assignedTo = fields['System.AssignedTo']; + const createdBy = fields['System.CreatedBy']; + + return { + id: wi.id, + number: wi.id, // Alias for compatibility + title: fields['System.Title'], + body: fields['System.Description'], + state, + workItemType: fields['System.WorkItemType'], + tags: (fields['System.Tags'] || '').split(';').map(t => t.trim()).filter(Boolean), + assignees: assignedTo + ? [{ + login: assignedTo.uniqueName, + displayName: assignedTo.displayName, + avatarUrl: assignedTo.imageUrl, + }] + : [], + author: { + login: createdBy.uniqueName, + displayName: createdBy.displayName, + avatarUrl: createdBy.imageUrl, + }, + priority: fields['Microsoft.VSTS.Common.Priority'], + iteration: fields['System.IterationPath'], + areaPath: fields['System.AreaPath'], + createdAt: fields['System.CreatedDate'], + updatedAt: fields['System.ChangedDate'], + closedAt: state === 'closed' ? fields['System.ChangedDate'] : undefined, + url: wi.url, + htmlUrl: wi._links?.html?.href || `${config.instanceUrl}/${config.organization}/${config.project}/_workitems/edit/${wi.id}`, + }; +} + +/** + * Check ADO connection (using saved project config) + */ +export function registerCheckConnection(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_CHECK_CONNECTION, + async (_, projectId: string): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found. Set ADO_ORGANIZATION, ADO_PROJECT, and ADO_PAT in .env' }; + } + + try { + // Test connection by fetching project info + await adoFetch(config, '/projects'); + return { success: true, data: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to connect to Azure DevOps', + }; + } + } + ); +} + +/** + * Test ADO connection with provided credentials (before saving) + */ +export function registerTestConnection(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_TEST_CONNECTION, + async ( + _, + credentials: { + organization: string; + project: string; + repoName: string; + pat: string; + instanceUrl: string; + } + ): Promise> => { + if (!credentials.organization || !credentials.project || !credentials.pat) { + return { success: false, error: 'Organization, project, and PAT are required' }; + } + + // Build config from provided credentials + const config = { + organization: credentials.organization, + project: credentials.project, + repoName: credentials.repoName || credentials.project, + pat: credentials.pat, + instanceUrl: credentials.instanceUrl || 'https://dev.azure.com', + }; + + try { + // Test connection by fetching project info + await adoFetch(config, '/projects'); + return { success: true, data: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to connect to Azure DevOps', + }; + } + } + ); +} + +/** + * Get list of work items from project + */ +export function registerGetWorkItems(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_GET_WORK_ITEMS, + async (_, projectId: string, state: 'open' | 'closed' | 'all' = 'open'): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + // Build WIQL query + const projectSafe = sanitizeWiqlString(config.project); + let stateCondition = ''; + if (state === 'open') { + stateCondition = "AND ([System.State] = 'New' OR [System.State] = 'Active')"; + } else if (state === 'closed') { + stateCondition = "AND ([System.State] = 'Closed' OR [System.State] = 'Resolved' OR [System.State] = 'Done')"; + } + + const wiqlQuery = { + query: `SELECT [System.Id] FROM WorkItems WHERE [System.TeamProject] = '${projectSafe}' ${stateCondition} ORDER BY [System.ChangedDate] DESC`, + }; + + const queryResult = await adoFetch(config, '/wit/wiql', { + method: 'POST', + body: JSON.stringify(wiqlQuery), + }) as { workItems?: Array<{ id: number }> }; + + if (!queryResult.workItems || queryResult.workItems.length === 0) { + return { success: true, data: [] }; + } + + // Fetch full work item details (batch, max 200) + const ids = queryResult.workItems.slice(0, 200).map(wi => wi.id); + const workItemsResult = await adoFetch( + config, + `/wit/workitems?ids=${ids.join(',')}&$expand=All` + ) as { value: ADOAPIWorkItem[] }; + + const result: ADOWorkItem[] = workItemsResult.value.map(wi => + transformWorkItem(wi, config) + ); + + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch work items', + }; + } + } + ); +} + +/** + * Get a single work item by ID + */ +export function registerGetWorkItem(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_GET_WORK_ITEM, + async (_, projectId: string, workItemId: number): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const wi = await adoFetch( + config, + `/wit/workitems/${workItemId}?$expand=All` + ) as ADOAPIWorkItem; + + const result = transformWorkItem(wi, config); + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch work item', + }; + } + } + ); +} + +/** + * Get comments for a work item + */ +export function registerGetWorkItemComments(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_GET_WORK_ITEM_COMMENTS, + async (_, projectId: string, workItemId: number): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const commentsResult = await adoFetch( + config, + `/wit/workitems/${workItemId}/comments` + ) as { comments: ADOAPIComment[] }; + + return { success: true, data: commentsResult.comments || [] }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch work item comments', + }; + } + } + ); +} + +/** + * Create a new work item + */ +export function registerCreateWorkItem(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_CREATE_WORK_ITEM, + async ( + _, + projectId: string, + workItemType: string, + title: string, + body?: string, + tags?: string[] + ): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const operations = [ + { op: 'add', path: '/fields/System.Title', value: title }, + ]; + + if (body) { + operations.push({ op: 'add', path: '/fields/System.Description', value: body }); + } + + if (tags && tags.length > 0) { + operations.push({ op: 'add', path: '/fields/System.Tags', value: tags.join('; ') }); + } + + const wi = await adoPatch( + config, + `/wit/workitems/$${encodeURIComponent(workItemType)}`, + operations + ) as ADOAPIWorkItem; + + const result = transformWorkItem(wi, config); + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to create work item', + }; + } + } + ); +} + +/** + * Update a work item + */ +export function registerUpdateWorkItem(): void { + ipcMain.handle( + IPC_CHANNELS.ADO_UPDATE_WORK_ITEM, + async ( + _, + projectId: string, + workItemId: number, + updates: { title?: string; body?: string; state?: string; tags?: string[] } + ): Promise> => { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + const config = getADOConfig(project); + if (!config) { + return { success: false, error: 'No Azure DevOps configuration found' }; + } + + try { + const operations: Array<{ op: string; path: string; value?: unknown }> = []; + + if (updates.title !== undefined) { + operations.push({ op: 'replace', path: '/fields/System.Title', value: updates.title }); + } + if (updates.body !== undefined) { + operations.push({ op: 'replace', path: '/fields/System.Description', value: updates.body }); + } + if (updates.state !== undefined) { + operations.push({ op: 'replace', path: '/fields/System.State', value: updates.state }); + } + if (updates.tags !== undefined) { + operations.push({ op: 'replace', path: '/fields/System.Tags', value: updates.tags.join('; ') }); + } + + if (operations.length === 0) { + return { success: false, error: 'No updates provided' }; + } + + const wi = await adoPatch( + config, + `/wit/workitems/${workItemId}`, + operations + ) as ADOAPIWorkItem; + + const result = transformWorkItem(wi, config); + return { success: true, data: result }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to update work item', + }; + } + } + ); +} + +/** + * Register all work item handlers + */ +export function registerWorkItemHandlers(): void { + registerCheckConnection(); + registerTestConnection(); + registerGetWorkItems(); + registerGetWorkItem(); + registerGetWorkItemComments(); + registerCreateWorkItem(); + registerUpdateWorkItem(); +} diff --git a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts index cbe4a67b68..9550b1ef66 100644 --- a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts @@ -1,6 +1,7 @@ import type { BrowserWindow } from 'electron'; import path from 'path'; -import { IPC_CHANNELS, getSpecsDir, AUTO_BUILD_PATHS } from '../../shared/constants'; +import { existsSync } from 'fs'; +import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../shared/constants'; import type { SDKRateLimitInfo, Task, @@ -15,6 +16,7 @@ import { fileWatcher } from '../file-watcher'; import { projectStore } from '../project-store'; import { notificationService } from '../notification-service'; import { persistPlanStatusSync, getPlanPath } from './task/plan-file-utils'; +import { findTaskWorktree } from '../worktree-paths'; /** @@ -81,6 +83,12 @@ export function registerAgenteventsHandlers( try { const projects = projectStore.getProjects(); + // IMPORTANT: Invalidate cache for all projects to ensure we get fresh data + // This prevents race conditions where cached task data has stale status + for (const p of projects) { + projectStore.invalidateTasksCache(p.id); + } + for (const p of projects) { const tasks = projectStore.getTasks(p.id); task = tasks.find((t) => t.id === taskId || t.specId === taskId); @@ -92,13 +100,39 @@ export function registerAgenteventsHandlers( if (task && project) { const taskTitle = task.title || task.specId; - const planPath = getPlanPath(project, task); + const mainPlanPath = getPlanPath(project, task); + const projectId = project.id; // Capture for closure + + // Capture task values for closure + const taskSpecId = task.specId; + const projectPath = project.path; + const autoBuildPath = project.autoBuildPath; // Use shared utility for persisting status (prevents race conditions) + // Persist to both main project AND worktree (if exists) for consistency const persistStatus = (status: TaskStatus) => { - const persisted = persistPlanStatusSync(planPath, status); - if (persisted) { - console.log(`[Task ${taskId}] Persisted status to plan: ${status}`); + // Persist to main project + const mainPersisted = persistPlanStatusSync(mainPlanPath, status, projectId); + if (mainPersisted) { + console.warn(`[Task ${taskId}] Persisted status to main plan: ${status}`); + } + + // Also persist to worktree if it exists + const worktreePath = findTaskWorktree(projectPath, taskSpecId); + if (worktreePath) { + const specsBaseDir = getSpecsDir(autoBuildPath); + const worktreePlanPath = path.join( + worktreePath, + specsBaseDir, + taskSpecId, + AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN + ); + if (existsSync(worktreePlanPath)) { + const worktreePersisted = persistPlanStatusSync(worktreePlanPath, status, projectId); + if (worktreePersisted) { + console.warn(`[Task ${taskId}] Persisted status to worktree plan: ${status}`); + } + } } }; @@ -113,7 +147,7 @@ export function registerAgenteventsHandlers( task.subtasks.some((s) => s.status !== 'completed'); if (isActiveStatus && !hasIncompleteSubtasks) { - console.log(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully)`); + console.warn(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully)`); persistStatus('human_review'); mainWindow.webContents.send( IPC_CHANNELS.TASK_STATUS_CHANGE, @@ -160,18 +194,37 @@ export function registerAgenteventsHandlers( newStatus ); - // CRITICAL: Persist status to plan file to prevent flip-flop on task list refresh + // CRITICAL: Persist status to plan file(s) to prevent flip-flop on task list refresh // When getTasks() is called, it reads status from the plan file. Without persisting, // the status in the file might differ from the UI, causing inconsistent state. // Uses shared utility with locking to prevent race conditions. + // IMPORTANT: We persist to BOTH main project AND worktree (if exists) to ensure + // consistency, since getTasks() prefers the worktree version. try { const projects = projectStore.getProjects(); for (const p of projects) { const tasks = projectStore.getTasks(p.id); const task = tasks.find((t) => t.id === taskId || t.specId === taskId); if (task) { - const planPath = getPlanPath(p, task); - persistPlanStatusSync(planPath, newStatus); + // Persist to main project plan file + const mainPlanPath = getPlanPath(p, task); + persistPlanStatusSync(mainPlanPath, newStatus, p.id); + + // Also persist to worktree plan file if it exists + // This ensures consistency since getTasks() prefers worktree version + const worktreePath = findTaskWorktree(p.path, task.specId); + if (worktreePath) { + const specsBaseDir = getSpecsDir(p.autoBuildPath); + const worktreePlanPath = path.join( + worktreePath, + specsBaseDir, + task.specId, + AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN + ); + if (existsSync(worktreePlanPath)) { + persistPlanStatusSync(worktreePlanPath, newStatus, p.id); + } + } break; } } diff --git a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts index 1d0b963efc..66c7f3ee3d 100644 --- a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts @@ -11,6 +11,7 @@ import type { IPCResult, AppUpdateInfo } from '../../shared/types'; import { checkForUpdates, downloadUpdate, + downloadStableVersion, quitAndInstall, getCurrentVersion } from '../app-updater'; @@ -65,6 +66,26 @@ export function registerAppUpdateHandlers(): void { } ); + /** + * APP_UPDATE_DOWNLOAD_STABLE: Download stable version (for downgrade from beta) + * Uses allowDowngrade to download an older stable version + */ + ipcMain.handle( + IPC_CHANNELS.APP_UPDATE_DOWNLOAD_STABLE, + async (): Promise => { + try { + await downloadStableVersion(); + return { success: true }; + } catch (error) { + console.error('[app-update-handlers] Download stable version failed:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to download stable version' + }; + } + } + ); + /** * APP_UPDATE_INSTALL: Quit and install update * Quits the app and installs the downloaded update diff --git a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts b/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts deleted file mode 100644 index 4a4ab66d82..0000000000 --- a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts +++ /dev/null @@ -1,321 +0,0 @@ -import { ipcMain } from 'electron'; -import type { BrowserWindow } from 'electron'; -import { IPC_CHANNELS } from '../../shared/constants'; -import type { IPCResult } from '../../shared/types'; -import path from 'path'; -import { existsSync, readFileSync, writeFileSync } from 'fs'; -import type { AutoBuildSourceUpdateProgress, SourceEnvConfig, SourceEnvCheckResult } from '../../shared/types'; -import { checkForUpdates as checkSourceUpdates, downloadAndApplyUpdate, getBundledVersion, getEffectiveVersion, getEffectiveSourcePath } from '../auto-claude-updater'; -import { debugLog } from '../../shared/utils/debug-logger'; - - -/** - * Register all autobuild-source-related IPC handlers - */ -export function registerAutobuildSourceHandlers( - getMainWindow: () => BrowserWindow | null -): void { - // ============================================ - // Auto Claude Source Update Operations - // ============================================ - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_CHECK, - async (): Promise> => { - console.log('[autobuild-source] Check for updates called'); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK called'); - try { - const result = await checkSourceUpdates(); - console.log('[autobuild-source] Check result:', JSON.stringify(result, null, 2)); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK result:', result); - return { success: true, data: result }; - } catch (error) { - console.error('[autobuild-source] Check error:', error); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK error:', error); - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to check for updates' - }; - } - } - ); - - ipcMain.on( - IPC_CHANNELS.AUTOBUILD_SOURCE_DOWNLOAD, - () => { - debugLog('[IPC] Autobuild source download requested'); - const mainWindow = getMainWindow(); - if (!mainWindow) { - debugLog('[IPC] No main window available, aborting update'); - return; - } - - // Start download in background - downloadAndApplyUpdate((progress) => { - debugLog('[IPC] Update progress:', progress.stage, progress.message); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - progress - ); - }).then((result) => { - if (result.success) { - debugLog('[IPC] Update completed successfully, version:', result.version); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'complete', - message: `Updated to version ${result.version}`, - newVersion: result.version // Include new version for UI refresh - } as AutoBuildSourceUpdateProgress - ); - } else { - debugLog('[IPC] Update failed:', result.error); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'error', - message: result.error || 'Update failed' - } as AutoBuildSourceUpdateProgress - ); - } - }).catch((error) => { - debugLog('[IPC] Update error:', error instanceof Error ? error.message : error); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'error', - message: error instanceof Error ? error.message : 'Update failed' - } as AutoBuildSourceUpdateProgress - ); - }); - - // Send initial progress - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'checking', - message: 'Starting update...' - } as AutoBuildSourceUpdateProgress - ); - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_VERSION, - async (): Promise> => { - try { - // Use effective version which accounts for source updates - const version = getEffectiveVersion(); - debugLog('[IPC] Returning effective version:', version); - return { success: true, data: version }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to get version' - }; - } - } - ); - - // ============================================ - // Auto Claude Source Environment Operations - // ============================================ - - /** - * Parse an .env file content into a key-value object - */ - const parseSourceEnvFile = (content: string): Record => { - const vars: Record = {}; - for (const line of content.split('\n')) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith('#')) continue; - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - let value = trimmed.substring(eqIndex + 1).trim(); - // Remove quotes if present - if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { - value = value.slice(1, -1); - } - vars[key] = value; - } - } - return vars; - }; - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET, - async (): Promise> => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: true, - data: { - hasClaudeToken: false, - envExists: false, - sourcePath: undefined - } - }; - } - - const envPath = path.join(sourcePath, '.env'); - const envExists = existsSync(envPath); - - if (!envExists) { - return { - success: true, - data: { - hasClaudeToken: false, - envExists: false, - sourcePath - } - }; - } - - const content = readFileSync(envPath, 'utf-8'); - const vars = parseSourceEnvFile(content); - const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN']; - - return { - success: true, - data: { - hasClaudeToken: hasToken, - claudeOAuthToken: hasToken ? vars['CLAUDE_CODE_OAUTH_TOKEN'] : undefined, - envExists: true, - sourcePath - } - }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to get source env' - }; - } - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, - async (_, config: { claudeOAuthToken?: string }): Promise => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: false, - error: 'Auto-Claude source path not found. Please configure it in App Settings.' - }; - } - - const envPath = path.join(sourcePath, '.env'); - - // Read existing content or start fresh - let existingContent = ''; - const existingVars: Record = {}; - - if (existsSync(envPath)) { - existingContent = readFileSync(envPath, 'utf-8'); - Object.assign(existingVars, parseSourceEnvFile(existingContent)); - } - - // Update the token - if (config.claudeOAuthToken !== undefined) { - existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken; - } - - // Rebuild the .env file preserving comments and structure - const lines = existingContent.split('\n'); - const processedKeys = new Set(); - const outputLines: string[] = []; - - for (const line of lines) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith('#')) { - outputLines.push(line); - continue; - } - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - if (key in existingVars) { - outputLines.push(`${key}=${existingVars[key]}`); - processedKeys.add(key); - } else { - outputLines.push(line); - } - } else { - outputLines.push(line); - } - } - - // Add any new keys that weren't in the original file - for (const [key, value] of Object.entries(existingVars)) { - if (!processedKeys.has(key)) { - outputLines.push(`${key}=${value}`); - } - } - - writeFileSync(envPath, outputLines.join('\n')); - - return { success: true }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to update source env' - }; - } - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN, - async (): Promise> => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: true, - data: { - hasToken: false, - sourcePath: undefined, - error: 'Auto-Claude source path not found' - } - }; - } - - const envPath = path.join(sourcePath, '.env'); - if (!existsSync(envPath)) { - return { - success: true, - data: { - hasToken: false, - sourcePath, - error: '.env file does not exist' - } - }; - } - - const content = readFileSync(envPath, 'utf-8'); - const vars = parseSourceEnvFile(content); - const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN'] && vars['CLAUDE_CODE_OAUTH_TOKEN'].length > 0; - - return { - success: true, - data: { - hasToken, - sourcePath - } - }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to check source token' - }; - } - } - ); - -} diff --git a/apps/frontend/src/main/ipc-handlers/env-handlers.ts b/apps/frontend/src/main/ipc-handlers/env-handlers.ts index 9574215b9e..0ebf67cf32 100644 --- a/apps/frontend/src/main/ipc-handlers/env-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/env-handlers.ts @@ -8,6 +8,8 @@ import { existsSync, readFileSync, writeFileSync } from 'fs'; import { spawn } from 'child_process'; import { projectStore } from '../project-store'; import { parseEnvFile } from './utils'; +import { getClaudeCliInvocation, getClaudeCliInvocationAsync } from '../claude-cli-utils'; +import { debugError } from '../../shared/utils/debug-logger'; // GitLab environment variable keys const GITLAB_ENV_KEYS = { @@ -18,6 +20,16 @@ const GITLAB_ENV_KEYS = { AUTO_SYNC: 'GITLAB_AUTO_SYNC' } as const; +// Azure DevOps environment variable keys +const ADO_ENV_KEYS = { + ENABLED: 'ADO_ENABLED', + ORGANIZATION: 'ADO_ORGANIZATION', + PROJECT: 'ADO_PROJECT', + REPO_NAME: 'ADO_REPO_NAME', + PAT: 'ADO_PAT', + INSTANCE_URL: 'ADO_INSTANCE_URL' +} as const; + /** * Helper to generate .env line (DRY) */ @@ -25,6 +37,43 @@ function envLine(vars: Record, key: string, defaultVal: string = return vars[key] ? `${key}=${vars[key]}` : `# ${key}=${defaultVal}`; } +type ResolvedClaudeCliInvocation = + | { command: string; env: Record } + | { error: string }; + +function resolveClaudeCliInvocation(): ResolvedClaudeCliInvocation { + try { + const invocation = getClaudeCliInvocation(); + if (!invocation?.command) { + throw new Error('Claude CLI path not resolved'); + } + return { command: invocation.command, env: invocation.env }; + } catch (error) { + debugError('[IPC] Failed to resolve Claude CLI path:', error); + return { + error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path', + }; + } +} + +/** + * Async version of resolveClaudeCliInvocation - non-blocking for main process + */ +async function resolveClaudeCliInvocationAsync(): Promise { + try { + const invocation = await getClaudeCliInvocationAsync(); + if (!invocation?.command) { + throw new Error('Claude CLI path not resolved'); + } + return { command: invocation.command, env: invocation.env }; + } catch (error) { + debugError('[IPC] Failed to resolve Claude CLI path:', error); + return { + error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path', + }; + } +} + /** * Register all env-related IPC handlers @@ -94,6 +143,25 @@ export function registerEnvHandlers( if (config.gitlabAutoSync !== undefined) { existingVars[GITLAB_ENV_KEYS.AUTO_SYNC] = config.gitlabAutoSync ? 'true' : 'false'; } + // Azure DevOps Integration + if (config.adoEnabled !== undefined) { + existingVars[ADO_ENV_KEYS.ENABLED] = config.adoEnabled ? 'true' : 'false'; + } + if (config.adoOrganization !== undefined) { + existingVars[ADO_ENV_KEYS.ORGANIZATION] = config.adoOrganization; + } + if (config.adoProject !== undefined) { + existingVars[ADO_ENV_KEYS.PROJECT] = config.adoProject; + } + if (config.adoRepoName !== undefined) { + existingVars[ADO_ENV_KEYS.REPO_NAME] = config.adoRepoName; + } + if (config.adoPat !== undefined) { + existingVars[ADO_ENV_KEYS.PAT] = config.adoPat; + } + if (config.adoInstanceUrl !== undefined) { + existingVars[ADO_ENV_KEYS.INSTANCE_URL] = config.adoInstanceUrl; + } // Git/Worktree Settings if (config.defaultBranch !== undefined) { existingVars['DEFAULT_BRANCH'] = config.defaultBranch; @@ -221,6 +289,16 @@ ${envLine(existingVars, GITLAB_ENV_KEYS.TOKEN)} ${envLine(existingVars, GITLAB_ENV_KEYS.PROJECT, 'group/project')} ${envLine(existingVars, GITLAB_ENV_KEYS.AUTO_SYNC, 'false')} +# ============================================================================= +# AZURE DEVOPS INTEGRATION (OPTIONAL) +# ============================================================================= +${existingVars[ADO_ENV_KEYS.ENABLED] !== undefined ? `${ADO_ENV_KEYS.ENABLED}=${existingVars[ADO_ENV_KEYS.ENABLED]}` : `# ${ADO_ENV_KEYS.ENABLED}=false`} +${envLine(existingVars, ADO_ENV_KEYS.ORGANIZATION)} +${envLine(existingVars, ADO_ENV_KEYS.PROJECT)} +${envLine(existingVars, ADO_ENV_KEYS.REPO_NAME)} +${envLine(existingVars, ADO_ENV_KEYS.PAT)} +${envLine(existingVars, ADO_ENV_KEYS.INSTANCE_URL, 'https://dev.azure.com')} + # ============================================================================= # GIT/WORKTREE SETTINGS (OPTIONAL) # ============================================================================= @@ -333,6 +411,7 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ linearEnabled: false, githubEnabled: false, gitlabEnabled: false, + adoEnabled: false, graphitiEnabled: false, enableFancyUi: true, claudeTokenIsGlobal: false, @@ -407,6 +486,25 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ config.gitlabAutoSync = true; } + // Azure DevOps config + if (vars[ADO_ENV_KEYS.PAT]) { + config.adoPat = vars[ADO_ENV_KEYS.PAT]; + // Enable by default if PAT exists and ADO_ENABLED is not explicitly false + config.adoEnabled = vars[ADO_ENV_KEYS.ENABLED]?.toLowerCase() !== 'false'; + } + if (vars[ADO_ENV_KEYS.ORGANIZATION]) { + config.adoOrganization = vars[ADO_ENV_KEYS.ORGANIZATION]; + } + if (vars[ADO_ENV_KEYS.PROJECT]) { + config.adoProject = vars[ADO_ENV_KEYS.PROJECT]; + } + if (vars[ADO_ENV_KEYS.REPO_NAME]) { + config.adoRepoName = vars[ADO_ENV_KEYS.REPO_NAME]; + } + if (vars[ADO_ENV_KEYS.INSTANCE_URL]) { + config.adoInstanceUrl = vars[ADO_ENV_KEYS.INSTANCE_URL]; + } + // Git/Worktree config if (vars['DEFAULT_BRANCH']) { config.defaultBranch = vars['DEFAULT_BRANCH']; @@ -552,13 +650,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ return { success: false, error: 'Project not found' }; } + // Use async version to avoid blocking main process during CLI detection + const resolved = await resolveClaudeCliInvocationAsync(); + if ('error' in resolved) { + return { success: false, error: resolved.error }; + } + const claudeCmd = resolved.command; + const claudeEnv = resolved.env; + try { // Check if Claude CLI is available and authenticated const result = await new Promise((resolve) => { - const proc = spawn('claude', ['--version'], { + const proc = spawn(claudeCmd, ['--version'], { cwd: project.path, - env: { ...process.env }, - shell: true + env: claudeEnv, + shell: false }); let _stdout = ''; @@ -576,10 +682,10 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ if (code === 0) { // Claude CLI is available, check if authenticated // Run a simple command that requires auth - const authCheck = spawn('claude', ['api', '--help'], { + const authCheck = spawn(claudeCmd, ['api', '--help'], { cwd: project.path, - env: { ...process.env }, - shell: true + env: claudeEnv, + shell: false }); authCheck.on('close', (authCode: number | null) => { @@ -614,6 +720,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ }); }); + if (!result.success) { + return { success: false, error: result.error || 'Failed to check Claude auth' }; + } return { success: true, data: result }; } catch (error) { return { @@ -632,13 +741,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ return { success: false, error: 'Project not found' }; } + // Use async version to avoid blocking main process during CLI detection + const resolved = await resolveClaudeCliInvocationAsync(); + if ('error' in resolved) { + return { success: false, error: resolved.error }; + } + const claudeCmd = resolved.command; + const claudeEnv = resolved.env; + try { // Run claude setup-token which will open browser for OAuth const result = await new Promise((resolve) => { - const proc = spawn('claude', ['setup-token'], { + const proc = spawn(claudeCmd, ['setup-token'], { cwd: project.path, - env: { ...process.env }, - shell: true, + env: claudeEnv, + shell: false, stdio: 'inherit' // This allows the terminal to handle the interactive auth }); @@ -666,6 +783,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ }); }); + if (!result.success) { + return { success: false, error: result.error || 'Failed to invoke Claude setup' }; + } return { success: true, data: result }; } catch (error) { return { diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts index 616106675d..4c3c942f7e 100644 --- a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts +++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts @@ -10,11 +10,15 @@ const mockSpawn = vi.fn(); const mockExecSync = vi.fn(); const mockExecFileSync = vi.fn(); -vi.mock('child_process', () => ({ - spawn: (...args: unknown[]) => mockSpawn(...args), - execSync: (...args: unknown[]) => mockExecSync(...args), - execFileSync: (...args: unknown[]) => mockExecFileSync(...args) -})); +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: (...args: unknown[]) => mockSpawn(...args), + execSync: (...args: unknown[]) => mockExecSync(...args), + execFileSync: (...args: unknown[]) => mockExecFileSync(...args) + }; +}); // Mock shell.openExternal const mockOpenExternal = vi.fn(); @@ -82,6 +86,13 @@ vi.mock('../../../env-utils', () => ({ isCommandAvailable: vi.fn((cmd: string) => mockFindExecutable(cmd) !== null) })); +// Mock cli-tool-manager to avoid child_process import issues +vi.mock('../../../cli-tool-manager', () => ({ + getToolPath: vi.fn(() => '/usr/local/bin/gh'), + detectCLITools: vi.fn(), + getAllToolStatus: vi.fn() +})); + // Create mock process for spawn function createMockProcess(): EventEmitter & { stdout: EventEmitter | null; diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts new file mode 100644 index 0000000000..751578da7f --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts @@ -0,0 +1,260 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import type { Project } from '../../../../shared/types'; +import { IPC_CHANNELS } from '../../../../shared/constants'; +import type { BrowserWindow } from 'electron'; +import type { AgentManager } from '../../../agent/agent-manager'; +import type { createIPCCommunicators as createIPCCommunicatorsType } from '../utils/ipc-communicator'; + +const mockIpcMain = vi.hoisted(() => { + class HoistedMockIpcMain { + handlers = new Map(); + listeners = new Map(); + + handle(channel: string, handler: Function): void { + this.handlers.set(channel, handler); + } + + on(channel: string, listener: Function): void { + this.listeners.set(channel, listener); + } + + async invokeHandler(channel: string, ...args: unknown[]): Promise { + const handler = this.handlers.get(channel); + if (!handler) { + throw new Error(`No handler for channel: ${channel}`); + } + return handler({}, ...args); + } + + async emit(channel: string, ...args: unknown[]): Promise { + const listener = this.listeners.get(channel); + if (!listener) { + throw new Error(`No listener for channel: ${channel}`); + } + await listener({}, ...args); + } + + reset(): void { + this.handlers.clear(); + this.listeners.clear(); + } + } + + return new HoistedMockIpcMain(); +}); + +const mockRunPythonSubprocess = vi.fn(); +const mockValidateGitHubModule = vi.fn(); +const mockGetRunnerEnv = vi.fn(); +type CreateIPCCommunicators = typeof createIPCCommunicatorsType; + +const mockCreateIPCCommunicators = vi.fn( + (..._args: Parameters) => ({ + sendProgress: vi.fn(), + sendComplete: vi.fn(), + sendError: vi.fn(), + }) +) as unknown as CreateIPCCommunicators; + +const projectRef: { current: Project | null } = { current: null }; +const tempDirs: string[] = []; + +vi.mock('electron', () => ({ + ipcMain: mockIpcMain, + BrowserWindow: class {}, + app: { + getPath: vi.fn(() => '/tmp'), + on: vi.fn(), + }, +})); + +vi.mock('../../../agent/agent-manager', () => ({ + AgentManager: class { + startSpecCreation = vi.fn(); + }, +})); + +vi.mock('../utils/ipc-communicator', () => ({ + createIPCCommunicators: (...args: Parameters) => + mockCreateIPCCommunicators(...args), +})); + +vi.mock('../utils/project-middleware', () => ({ + withProjectOrNull: async (_projectId: string, handler: (project: Project) => Promise) => { + if (!projectRef.current) { + return null; + } + return handler(projectRef.current); + }, +})); + +vi.mock('../utils/subprocess-runner', () => ({ + runPythonSubprocess: (...args: unknown[]) => mockRunPythonSubprocess(...args), + validateGitHubModule: (...args: unknown[]) => mockValidateGitHubModule(...args), + getPythonPath: () => '/tmp/python', + getRunnerPath: () => '/tmp/runner.py', + buildRunnerArgs: (_runnerPath: string, _projectPath: string, command: string, args: string[] = []) => [ + 'runner.py', + command, + ...args, + ], +})); + +vi.mock('../utils/runner-env', () => ({ + getRunnerEnv: (...args: unknown[]) => mockGetRunnerEnv(...args), +})); + +vi.mock('../utils', () => ({ + getGitHubConfig: vi.fn(() => null), + githubFetch: vi.fn(), +})); + +vi.mock('../../../settings-utils', () => ({ + readSettingsFile: vi.fn(() => ({})), +})); + +function createMockWindow(): BrowserWindow { + return { webContents: { send: vi.fn() } } as unknown as BrowserWindow; +} + +function createProject(): Project { + const projectPath = fs.mkdtempSync(path.join(os.tmpdir(), 'github-env-test-')); + tempDirs.push(projectPath); + return { + id: 'project-1', + name: 'Test Project', + path: projectPath, + autoBuildPath: '.auto-claude', + settings: { + model: 'default', + memoryBackend: 'file', + linearSync: false, + notifications: { + onTaskComplete: false, + onTaskFailed: false, + onReviewNeeded: false, + sound: false, + }, + graphitiMcpEnabled: false, + useClaudeMd: true, + }, + createdAt: new Date(), + updatedAt: new Date(), + }; +} + +describe('GitHub runner env usage', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockIpcMain.reset(); + projectRef.current = createProject(); + mockValidateGitHubModule.mockResolvedValue({ valid: true, backendPath: '/tmp/backend' }); + mockGetRunnerEnv.mockResolvedValue({ ANTHROPIC_AUTH_TOKEN: 'token' }); + }); + + afterEach(() => { + for (const dir of tempDirs) { + try { + fs.rmSync(dir, { recursive: true, force: true }); + } catch { + // Ignore cleanup errors for already-removed temp dirs. + } + } + tempDirs.length = 0; + }); + + it('passes runner env to PR review subprocess', async () => { + const { registerPRHandlers } = await import('../pr-handlers'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 123 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: { + prNumber: 123, + repo: 'test/repo', + success: true, + findings: [], + summary: '', + overallStatus: 'comment', + reviewedAt: new Date().toISOString(), + }, + }), + }); + + registerPRHandlers(() => createMockWindow()); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_PR_REVIEW, projectRef.current?.id, 123); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith({ USE_CLAUDE_MD: 'true' }); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); + + it('passes runner env to triage subprocess', async () => { + const { registerTriageHandlers } = await import('../triage-handlers'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 124 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: [], + }), + }); + + registerTriageHandlers(() => createMockWindow()); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_TRIAGE_RUN, projectRef.current?.id); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith(); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); + + it('passes runner env to autofix analyze preview subprocess', async () => { + const { registerAutoFixHandlers } = await import('../autofix-handlers'); + const { AgentManager: MockedAgentManager } = await import('../../../agent/agent-manager'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 125 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: { + totalIssues: 0, + primaryIssue: null, + proposedBatches: [], + singleIssues: [], + }, + }), + }); + + const agentManager: AgentManager = new MockedAgentManager(); + const getMainWindow: () => BrowserWindow | null = () => createMockWindow(); + + registerAutoFixHandlers(agentManager, getMainWindow); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_AUTOFIX_ANALYZE_PREVIEW, projectRef.current?.id); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith(); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts index 578ebace52..187eaa5d6b 100644 --- a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts @@ -28,6 +28,7 @@ import { parseJSONFromOutput, } from './utils/subprocess-runner'; import { AgentManager } from '../../agent/agent-manager'; +import { getRunnerEnv } from './utils/runner-env'; // Debug logging const { debug: debugLog } = createContextLogger('GitHub AutoFix'); @@ -277,11 +278,13 @@ async function checkNewIssues(project: Project): Promise const backendPath = validation.backendPath!; const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'check-new'); + const subprocessEnv = await getRunnerEnv(); const { promise } = runPythonSubprocess>({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onComplete: (stdout) => { return parseJSONFromOutput>(stdout); }, @@ -361,7 +364,15 @@ async function startAutoFix( // Create spec const taskDescription = buildInvestigationTask(issue.number, issue.title, issueContext); - const specData = await createSpecForIssue(project, issue.number, issue.title, taskDescription, issue.html_url, labels); + const specData = await createSpecForIssue( + project, + issue.number, + issue.title, + taskDescription, + issue.html_url, + labels, + project.settings?.mainBranch // Pass project's configured main branch + ); // Save auto-fix state const issuesDir = path.join(getGitHubDir(project), 'issues'); @@ -607,6 +618,7 @@ export function registerAutoFixHandlers( const backendPath = validation.backendPath!; const additionalArgs = issueNumbers && issueNumbers.length > 0 ? issueNumbers.map(n => n.toString()) : []; const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'batch-issues', additionalArgs); + const subprocessEnv = await getRunnerEnv(); debugLog('Spawning batch process', { args }); @@ -614,6 +626,7 @@ export function registerAutoFixHandlers( pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { sendProgress({ phase: 'batching', @@ -728,12 +741,14 @@ export function registerAutoFixHandlers( } const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'analyze-preview', additionalArgs); + const subprocessEnv = await getRunnerEnv(); debugLog('Spawning analyze-preview process', { args }); const { promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { sendProgress({ phase: 'analyzing', progress: percent, message }); }, diff --git a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts index 8a38619e79..9e2e5c0506 100644 --- a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts @@ -66,7 +66,8 @@ ${issue.body || 'No description provided.'} issue.title, description, issue.html_url, - labelNames + labelNames, + project.settings?.mainBranch // Pass project's configured main branch ); // Start spec creation with the existing spec directory diff --git a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts index 4f5a36d435..7ddae6e599 100644 --- a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts @@ -148,7 +148,8 @@ export function registerInvestigateIssue( issue.title, taskDescription, issue.html_url, - labels + labels, + project.settings?.mainBranch // Pass project's configured main branch ); // NOTE: We intentionally do NOT call agentManager.startSpecCreation() here diff --git a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts index 7f6b01f44a..a8fea6d47b 100644 --- a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts @@ -16,10 +16,12 @@ import { IPC_CHANNELS, MODEL_ID_MAP, DEFAULT_FEATURE_MODELS, DEFAULT_FEATURE_THI import { getGitHubConfig, githubFetch } from './utils'; import { readSettingsFile } from '../../settings-utils'; import { getAugmentedEnv } from '../../env-utils'; +import { getMemoryService, getDefaultDbPath } from '../../memory-service'; import type { Project, AppSettings } from '../../../shared/types'; import { createContextLogger } from './utils/logger'; import { withProjectOrNull } from './utils/project-middleware'; import { createIPCCommunicators } from './utils/ipc-communicator'; +import { getRunnerEnv } from './utils/runner-env'; import { runPythonSubprocess, getPythonPath, @@ -70,6 +72,13 @@ function getReviewKey(projectId: string, prNumber: number): string { return `${projectId}:${prNumber}`; } +/** + * Returns env vars for Claude.md usage; enabled unless explicitly opted out. + */ +function getClaudeMdEnv(project: Project): Record | undefined { + return project.settings?.useClaudeMd !== false ? { USE_CLAUDE_MD: 'true' } : undefined; +} + /** * PR review finding from AI analysis */ @@ -101,6 +110,7 @@ export interface PRReviewResult { error?: string; // Follow-up review fields reviewedCommitSha?: string; + reviewedFileBlobs?: Record; // filename β†’ blob SHA for rebase-resistant follow-ups isFollowupReview?: boolean; previousReviewId?: number; resolvedFindings?: string[]; @@ -124,6 +134,174 @@ export interface NewCommitsCheck { hasCommitsAfterPosting?: boolean; } +/** + * Lightweight merge readiness check result + * Used for real-time validation of AI verdict freshness + */ +export interface MergeReadiness { + /** PR is in draft mode */ + isDraft: boolean; + /** GitHub's mergeable status */ + mergeable: 'MERGEABLE' | 'CONFLICTING' | 'UNKNOWN'; + /** Simplified CI status */ + ciStatus: 'passing' | 'failing' | 'pending' | 'none'; + /** List of blockers that contradict a "ready to merge" verdict */ + blockers: string[]; +} + +/** + * PR review memory stored in the memory layer + * Represents key insights and learnings from a PR review + */ +export interface PRReviewMemory { + prNumber: number; + repo: string; + verdict: string; + timestamp: string; + summary: { + verdict: string; + verdict_reasoning?: string; + finding_counts?: Record; + total_findings?: number; + blockers?: string[]; + risk_assessment?: Record; + }; + keyFindings: Array<{ + severity: string; + category: string; + title: string; + description: string; + file: string; + line: number; + }>; + patterns: string[]; + gotchas: string[]; + isFollowup: boolean; +} + +/** + * Save PR review insights to the Electron memory layer (LadybugDB) + * + * Called after a PR review completes to persist learnings for cross-session context. + * Extracts key findings, patterns, and gotchas from the review result. + * + * @param result The completed PR review result + * @param repo Repository name (owner/repo) + * @param isFollowup Whether this is a follow-up review + */ +async function savePRReviewToMemory( + result: PRReviewResult, + repo: string, + isFollowup: boolean = false +): Promise { + const settings = readSettingsFile(); + if (!settings?.memoryEnabled) { + debugLog('Memory not enabled, skipping PR review memory save'); + return; + } + + try { + const memoryService = getMemoryService({ + dbPath: getDefaultDbPath(), + database: 'auto_claude_memory', + }); + + // Build the memory content with comprehensive insights + // We want to capture ALL meaningful findings so the AI can learn from patterns + + // Prioritize findings: critical > high > medium > low + // Include all critical/high, top 5 medium, top 3 low + const criticalFindings = result.findings.filter(f => f.severity === 'critical'); + const highFindings = result.findings.filter(f => f.severity === 'high'); + const mediumFindings = result.findings.filter(f => f.severity === 'medium').slice(0, 5); + const lowFindings = result.findings.filter(f => f.severity === 'low').slice(0, 3); + + const keyFindingsToSave = [ + ...criticalFindings, + ...highFindings, + ...mediumFindings, + ...lowFindings, + ].map(f => ({ + severity: f.severity, + category: f.category, + title: f.title, + description: f.description.substring(0, 500), // Truncate for storage + file: f.file, + line: f.line, + })); + + // Extract gotchas: security issues, critical bugs, and common mistakes + const gotchaCategories = ['security', 'error_handling', 'data_validation', 'race_condition']; + const gotchasToSave = result.findings + .filter(f => + f.severity === 'critical' || + f.severity === 'high' || + gotchaCategories.includes(f.category?.toLowerCase() || '') + ) + .map(f => `[${f.category}] ${f.title}: ${f.description.substring(0, 300)}`); + + // Extract patterns: group findings by category to identify recurring issues + const categoryGroups = result.findings.reduce((acc, f) => { + const cat = f.category || 'general'; + acc[cat] = (acc[cat] || 0) + 1; + return acc; + }, {} as Record); + + // Patterns are categories that appear multiple times (indicates a systematic issue) + const patternsToSave = Object.entries(categoryGroups) + .filter(([_, count]) => count >= 2) + .map(([category, count]) => `${category}: ${count} occurrences`); + + const memoryContent: PRReviewMemory = { + prNumber: result.prNumber, + repo, + verdict: result.overallStatus || 'unknown', + timestamp: new Date().toISOString(), + summary: { + verdict: result.overallStatus || 'unknown', + finding_counts: { + critical: criticalFindings.length, + high: highFindings.length, + medium: result.findings.filter(f => f.severity === 'medium').length, + low: result.findings.filter(f => f.severity === 'low').length, + }, + total_findings: result.findings.length, + }, + keyFindings: keyFindingsToSave, + patterns: patternsToSave, + gotchas: gotchasToSave, + isFollowup, + }; + + // Add follow-up specific info if applicable + if (isFollowup && result.resolvedFindings && result.unresolvedFindings) { + memoryContent.summary.verdict_reasoning = + `Resolved: ${result.resolvedFindings.length}, Unresolved: ${result.unresolvedFindings.length}`; + } + + // Save to memory as a pr_review episode + const episodeName = `PR #${result.prNumber} ${isFollowup ? 'Follow-up ' : ''}Review - ${repo}`; + const saveResult = await memoryService.addEpisode( + episodeName, + memoryContent, + 'pr_review', + `pr_review_${repo.replace('/', '_')}` + ); + + if (saveResult.success) { + debugLog('PR review saved to memory', { prNumber: result.prNumber, episodeId: saveResult.id }); + } else { + debugLog('Failed to save PR review to memory', { error: saveResult.error }); + } + + } catch (error) { + // Don't fail the review if memory save fails + debugLog('Error saving PR review to memory', { + error: error instanceof Error ? error.message : error + }); + } +} + /** * PR data from GitHub API */ @@ -542,6 +720,7 @@ function getReviewResult(project: Project, prNumber: number): PRReviewResult | n error: data.error, // Follow-up review fields (snake_case -> camelCase) reviewedCommitSha: data.reviewed_commit_sha, + reviewedFileBlobs: data.reviewed_file_blobs, isFollowupReview: data.is_followup_review ?? false, previousReviewId: data.previous_review_id, resolvedFindings: data.resolved_findings ?? [], @@ -628,10 +807,9 @@ async function runPRReview( const logCollector = new PRLogCollector(project, prNumber, repo, false); // Build environment with project settings - const subprocessEnv: Record = {}; - if (project.settings?.useClaudeMd !== false) { - subprocessEnv['USE_CLAUDE_MD'] = 'true'; - } + const subprocessEnv = await getRunnerEnv( + getClaudeMdEnv(project) + ); const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), @@ -681,6 +859,12 @@ async function runPRReview( // Finalize logs with success logCollector.finalize(true); + + // Save PR review insights to memory (async, non-blocking) + savePRReviewToMemory(result.data!, repo, false).catch(err => { + debugLog('Failed to save PR review to memory', { error: err.message }); + }); + return result.data!; } finally { // Clean up the registry when done (success or error) @@ -697,11 +881,11 @@ export function registerPRHandlers( ): void { debugLog('Registering PR handlers'); - // List open PRs + // List open PRs with pagination support ipcMain.handle( IPC_CHANNELS.GITHUB_PR_LIST, - async (_, projectId: string): Promise => { - debugLog('listPRs handler called', { projectId }); + async (_, projectId: string, page: number = 1): Promise => { + debugLog('listPRs handler called', { projectId, page }); const result = await withProjectOrNull(projectId, async (project) => { const config = getGitHubConfig(project); if (!config) { @@ -710,9 +894,10 @@ export function registerPRHandlers( } try { + // Use pagination: per_page=100 (GitHub max), page=1,2,3... const prs = await githubFetch( config.token, - `/repos/${config.repo}/pulls?state=open&per_page=50` + `/repos/${config.repo}/pulls?state=open&per_page=100&page=${page}` ) as Array<{ number: number; title: string; @@ -730,7 +915,7 @@ export function registerPRHandlers( html_url: string; }>; - debugLog('Fetched PRs', { count: prs.length }); + debugLog('Fetched PRs', { count: prs.length, page }); return prs.map(pr => ({ number: pr.number, title: pr.title, @@ -864,6 +1049,23 @@ export function registerPRHandlers( } ); + // Batch get saved reviews - more efficient than individual calls + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_GET_REVIEWS_BATCH, + async (_, projectId: string, prNumbers: number[]): Promise> => { + debugLog('getReviewsBatch handler called', { projectId, count: prNumbers.length }); + const result = await withProjectOrNull(projectId, async (project) => { + const reviews: Record = {}; + for (const prNumber of prNumbers) { + reviews[prNumber] = getReviewResult(project, prNumber); + } + debugLog('Batch loaded reviews', { count: Object.values(reviews).filter(r => r !== null).length }); + return reviews; + }); + return result ?? {}; + } + ); + // Get PR review logs ipcMain.handle( IPC_CHANNELS.GITHUB_PR_GET_LOGS, @@ -967,8 +1169,8 @@ export function registerPRHandlers( // Post review to GitHub ipcMain.handle( IPC_CHANNELS.GITHUB_PR_POST_REVIEW, - async (_, projectId: string, prNumber: number, selectedFindingIds?: string[]): Promise => { - debugLog('postPRReview handler called', { projectId, prNumber, selectedCount: selectedFindingIds?.length }); + async (_, projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => { + debugLog('postPRReview handler called', { projectId, prNumber, selectedCount: selectedFindingIds?.length, forceApprove: options?.forceApprove }); const postResult = await withProjectOrNull(projectId, async (project) => { const result = getReviewResult(project, prNumber); if (!result) { @@ -991,36 +1193,69 @@ export function registerPRHandlers( debugLog('Posting findings', { total: result.findings.length, selected: findings.length }); - // Build review body - let body = `## πŸ€– Auto Claude PR Review\n\n${result.summary}\n\n`; - - if (findings.length > 0) { - // Show selected count vs total if filtered - const countText = selectedSet - ? `${findings.length} selected of ${result.findings.length} total` - : `${findings.length} total`; - body += `### Findings (${countText})\n\n`; - - for (const f of findings) { - const emoji = { critical: 'πŸ”΄', high: '🟠', medium: '🟑', low: 'πŸ”΅' }[f.severity] || 'βšͺ'; - body += `#### ${emoji} [${f.severity.toUpperCase()}] ${f.title}\n`; - body += `πŸ“ \`${f.file}:${f.line}\`\n\n`; - body += `${f.description}\n\n`; - // Only show suggested fix if it has actual content - const suggestedFix = f.suggestedFix?.trim(); - if (suggestedFix) { - body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + // Build review body - different format for auto-approve with suggestions + let body: string; + + if (options?.forceApprove) { + // Auto-approve format: clean approval message with optional suggestions + body = `## βœ… Auto Claude Review - APPROVED\n\n`; + body += `**Status:** Ready to Merge\n\n`; + body += `**Summary:** ${result.summary}\n\n`; + + if (findings.length > 0) { + body += `---\n\n`; + body += `### πŸ’‘ Suggestions (${findings.length})\n\n`; + body += `*These are non-blocking suggestions for consideration:*\n\n`; + + for (const f of findings) { + const emoji = { critical: 'πŸ”΄', high: '🟠', medium: '🟑', low: 'πŸ”΅' }[f.severity] || 'βšͺ'; + body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`; + body += `πŸ“ \`${f.file}:${f.line}\`\n\n`; + body += `${f.description}\n\n`; + const suggestedFix = f.suggestedFix?.trim(); + if (suggestedFix) { + body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + } } } + + body += `---\n*This automated review found no blocking issues. The PR can be safely merged.*\n\n`; + body += `*Generated by Auto Claude*`; } else { - body += `*No findings selected for this review.*\n\n`; - } + // Standard review format + body = `## πŸ€– Auto Claude PR Review\n\n${result.summary}\n\n`; + + if (findings.length > 0) { + // Show selected count vs total if filtered + const countText = selectedSet + ? `${findings.length} selected of ${result.findings.length} total` + : `${findings.length} total`; + body += `### Findings (${countText})\n\n`; + + for (const f of findings) { + const emoji = { critical: 'πŸ”΄', high: '🟠', medium: '🟑', low: 'πŸ”΅' }[f.severity] || 'βšͺ'; + body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`; + body += `πŸ“ \`${f.file}:${f.line}\`\n\n`; + body += `${f.description}\n\n`; + // Only show suggested fix if it has actual content + const suggestedFix = f.suggestedFix?.trim(); + if (suggestedFix) { + body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + } + } + } else { + body += `*No findings selected for this review.*\n\n`; + } - body += `---\n*This review was generated by Auto Claude.*`; + body += `---\n*This review was generated by Auto Claude.*`; + } - // Determine review status based on selected findings + // Determine review status based on selected findings (or force approve) let overallStatus = result.overallStatus; - if (selectedSet) { + if (options?.forceApprove) { + // Force approve regardless of findings + overallStatus = 'approve'; + } else if (selectedSet) { const hasBlocker = findings.some(f => f.severity === 'critical' || f.severity === 'high'); overallStatus = hasBlocker ? 'request_changes' : (findings.length > 0 ? 'comment' : 'approve'); } @@ -1425,6 +1660,137 @@ export function registerPRHandlers( } ); + // Check merge readiness (lightweight freshness check for verdict validation) + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_CHECK_MERGE_READINESS, + async (_, projectId: string, prNumber: number): Promise => { + debugLog('checkMergeReadiness handler called', { projectId, prNumber }); + + const defaultResult: MergeReadiness = { + isDraft: false, + mergeable: 'UNKNOWN', + ciStatus: 'none', + blockers: [], + }; + + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + debugLog('No GitHub config found for checkMergeReadiness'); + return defaultResult; + } + + try { + // Fetch PR data including mergeable status + const pr = await githubFetch( + config.token, + `/repos/${config.repo}/pulls/${prNumber}` + ) as { + draft: boolean; + mergeable: boolean | null; + mergeable_state: string; + head: { sha: string }; + }; + + // Determine mergeable status + let mergeable: MergeReadiness['mergeable'] = 'UNKNOWN'; + if (pr.mergeable === true) { + mergeable = 'MERGEABLE'; + } else if (pr.mergeable === false || pr.mergeable_state === 'dirty') { + mergeable = 'CONFLICTING'; + } + + // Fetch combined commit status for CI + let ciStatus: MergeReadiness['ciStatus'] = 'none'; + try { + const status = await githubFetch( + config.token, + `/repos/${config.repo}/commits/${pr.head.sha}/status` + ) as { + state: 'success' | 'pending' | 'failure' | 'error'; + total_count: number; + }; + + if (status.total_count === 0) { + // No status checks, check for check runs (GitHub Actions) + const checkRuns = await githubFetch( + config.token, + `/repos/${config.repo}/commits/${pr.head.sha}/check-runs` + ) as { + total_count: number; + check_runs: Array<{ conclusion: string | null; status: string }>; + }; + + if (checkRuns.total_count > 0) { + const hasFailing = checkRuns.check_runs.some( + cr => cr.conclusion === 'failure' || cr.conclusion === 'cancelled' + ); + const hasPending = checkRuns.check_runs.some( + cr => cr.status !== 'completed' + ); + + if (hasFailing) { + ciStatus = 'failing'; + } else if (hasPending) { + ciStatus = 'pending'; + } else { + ciStatus = 'passing'; + } + } + } else { + // Use combined status + if (status.state === 'success') { + ciStatus = 'passing'; + } else if (status.state === 'pending') { + ciStatus = 'pending'; + } else { + ciStatus = 'failing'; + } + } + } catch (err) { + debugLog('Failed to fetch CI status', { prNumber, error: err instanceof Error ? err.message : err }); + // Continue without CI status + } + + // Build blockers list + const blockers: string[] = []; + if (pr.draft) { + blockers.push('PR is in draft mode'); + } + if (mergeable === 'CONFLICTING') { + blockers.push('Merge conflicts detected'); + } + if (ciStatus === 'failing') { + blockers.push('CI checks are failing'); + } + + debugLog('checkMergeReadiness result', { + prNumber, + isDraft: pr.draft, + mergeable, + ciStatus, + blockers, + }); + + return { + isDraft: pr.draft, + mergeable, + ciStatus, + blockers, + }; + } catch (error) { + debugLog('Failed to check merge readiness', { + prNumber, + error: error instanceof Error ? error.message : error, + }); + return defaultResult; + } + }); + + return result ?? defaultResult; + } + ); + // Run follow-up review ipcMain.on( IPC_CHANNELS.GITHUB_PR_FOLLOWUP_REVIEW, @@ -1489,10 +1855,9 @@ export function registerPRHandlers( const logCollector = new PRLogCollector(project, prNumber, repo, true); // Build environment with project settings - const followupEnv: Record = {}; - if (project.settings?.useClaudeMd !== false) { - followupEnv['USE_CLAUDE_MD'] = 'true'; - } + const followupEnv = await getRunnerEnv( + getClaudeMdEnv(project) + ); const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), @@ -1541,6 +1906,11 @@ export function registerPRHandlers( // Finalize logs with success logCollector.finalize(true); + // Save follow-up PR review insights to memory (async, non-blocking) + savePRReviewToMemory(result.data!, repo, true).catch(err => { + debugLog('Failed to save follow-up PR review to memory', { error: err.message }); + }); + debugLog('Follow-up review completed', { prNumber, findingsCount: result.data?.findings.length }); sendProgress({ phase: 'complete', @@ -1571,5 +1941,226 @@ export function registerPRHandlers( } ); + // Get workflows awaiting approval for a PR (fork PRs) + ipcMain.handle( + IPC_CHANNELS.GITHUB_WORKFLOWS_AWAITING_APPROVAL, + async (_, projectId: string, prNumber: number): Promise<{ + awaiting_approval: number; + workflow_runs: Array<{ id: number; name: string; html_url: string; workflow_name: string }>; + can_approve: boolean; + error?: string; + }> => { + debugLog('getWorkflowsAwaitingApproval handler called', { projectId, prNumber }); + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + return { awaiting_approval: 0, workflow_runs: [], can_approve: false, error: 'No GitHub config' }; + } + + try { + // First get the PR's head SHA + const prData = await githubFetch( + config.token, + `/repos/${config.repo}/pulls/${prNumber}` + ) as { head?: { sha?: string } }; + + const headSha = prData?.head?.sha; + if (!headSha) { + return { awaiting_approval: 0, workflow_runs: [], can_approve: false }; + } + + // Query workflow runs with action_required status + const runsData = await githubFetch( + config.token, + `/repos/${config.repo}/actions/runs?status=action_required&per_page=100` + ) as { workflow_runs?: Array<{ id: number; name: string; html_url: string; head_sha: string; workflow?: { name?: string } }> }; + + const allRuns = runsData?.workflow_runs || []; + + // Filter to only runs for this PR's head SHA + const prRuns = allRuns + .filter(run => run.head_sha === headSha) + .map(run => ({ + id: run.id, + name: run.name, + html_url: run.html_url, + workflow_name: run.workflow?.name || 'Unknown', + })); + + debugLog('Found workflows awaiting approval', { prNumber, count: prRuns.length }); + + return { + awaiting_approval: prRuns.length, + workflow_runs: prRuns, + can_approve: true, // Assume token has permission; will fail if not + }; + } catch (error) { + debugLog('Failed to get workflows awaiting approval', { prNumber, error: error instanceof Error ? error.message : error }); + return { + awaiting_approval: 0, + workflow_runs: [], + can_approve: false, + error: error instanceof Error ? error.message : 'Unknown error', + }; + } + }); + + return result ?? { awaiting_approval: 0, workflow_runs: [], can_approve: false }; + } + ); + + // Approve a workflow run + ipcMain.handle( + IPC_CHANNELS.GITHUB_WORKFLOW_APPROVE, + async (_, projectId: string, runId: number): Promise => { + debugLog('approveWorkflow handler called', { projectId, runId }); + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + debugLog('No GitHub config found'); + return false; + } + + try { + // Approve the workflow run + await githubFetch( + config.token, + `/repos/${config.repo}/actions/runs/${runId}/approve`, + { method: 'POST' } + ); + + debugLog('Workflow approved successfully', { runId }); + return true; + } catch (error) { + debugLog('Failed to approve workflow', { runId, error: error instanceof Error ? error.message : error }); + return false; + } + }); + + return result ?? false; + } + ); + + // Get PR review memories from the memory layer + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_MEMORY_GET, + async (_, projectId: string, limit: number = 10): Promise => { + debugLog('getPRReviewMemories handler called', { projectId, limit }); + const result = await withProjectOrNull(projectId, async (project) => { + const memoryDir = path.join(getGitHubDir(project), 'memory', project.name || 'unknown'); + const memories: PRReviewMemory[] = []; + + // Try to load from file-based storage + try { + const indexPath = path.join(memoryDir, 'reviews_index.json'); + if (!fs.existsSync(indexPath)) { + debugLog('No PR review memories found', { projectId }); + return []; + } + + const indexContent = fs.readFileSync(indexPath, 'utf-8'); + const index = JSON.parse(sanitizeNetworkData(indexContent)); + const reviews = index.reviews || []; + + // Load individual review memories + for (const entry of reviews.slice(0, limit)) { + try { + const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`); + if (fs.existsSync(reviewPath)) { + const reviewContent = fs.readFileSync(reviewPath, 'utf-8'); + const memory = JSON.parse(sanitizeNetworkData(reviewContent)); + memories.push({ + prNumber: memory.pr_number, + repo: memory.repo, + verdict: memory.summary?.verdict || 'unknown', + timestamp: memory.timestamp, + summary: memory.summary, + keyFindings: memory.key_findings || [], + patterns: memory.patterns || [], + gotchas: memory.gotchas || [], + isFollowup: memory.is_followup || false, + }); + } + } catch (err) { + debugLog('Failed to load PR review memory', { prNumber: entry.pr_number, error: err instanceof Error ? err.message : err }); + } + } + + debugLog('Loaded PR review memories', { count: memories.length }); + return memories; + } catch (error) { + debugLog('Failed to load PR review memories', { error: error instanceof Error ? error.message : error }); + return []; + } + }); + return result ?? []; + } + ); + + // Search PR review memories + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_MEMORY_SEARCH, + async (_, projectId: string, query: string, limit: number = 10): Promise => { + debugLog('searchPRReviewMemories handler called', { projectId, query, limit }); + const result = await withProjectOrNull(projectId, async (project) => { + const memoryDir = path.join(getGitHubDir(project), 'memory', project.name || 'unknown'); + const memories: PRReviewMemory[] = []; + const queryLower = query.toLowerCase(); + + // Search through file-based storage + try { + const indexPath = path.join(memoryDir, 'reviews_index.json'); + if (!fs.existsSync(indexPath)) { + return []; + } + + const indexContent = fs.readFileSync(indexPath, 'utf-8'); + const index = JSON.parse(sanitizeNetworkData(indexContent)); + const reviews = index.reviews || []; + + // Search individual review memories + for (const entry of reviews) { + try { + const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`); + if (fs.existsSync(reviewPath)) { + const reviewContent = fs.readFileSync(reviewPath, 'utf-8'); + + // Check if content matches query + if (reviewContent.toLowerCase().includes(queryLower)) { + const memory = JSON.parse(sanitizeNetworkData(reviewContent)); + memories.push({ + prNumber: memory.pr_number, + repo: memory.repo, + verdict: memory.summary?.verdict || 'unknown', + timestamp: memory.timestamp, + summary: memory.summary, + keyFindings: memory.key_findings || [], + patterns: memory.patterns || [], + gotchas: memory.gotchas || [], + isFollowup: memory.is_followup || false, + }); + } + } + + // Stop if we have enough + if (memories.length >= limit) { + break; + } + } catch (err) { + debugLog('Failed to search PR review memory', { prNumber: entry.pr_number, error: err instanceof Error ? err.message : err }); + } + } + + debugLog('Found matching PR review memories', { count: memories.length, query }); + return memories; + } catch (error) { + debugLog('Failed to search PR review memories', { error: error instanceof Error ? error.message : error }); + return []; + } + }); + return result ?? []; + } + ); + debugLog('PR handlers registered'); } diff --git a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts index b233f59bb1..7e71b12640 100644 --- a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts @@ -8,6 +8,7 @@ import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { Project, TaskMetadata } from '../../../shared/types'; import { withSpecNumberLock } from '../../utils/spec-number-lock'; import { debugLog } from './utils/logger'; +import { labelMatchesWholeWord } from '../shared/label-utils'; export interface SpecCreationData { specId: string; @@ -55,7 +56,14 @@ function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' | } // Check for infrastructure labels - if (lowerLabels.some(l => l.includes('infrastructure') || l.includes('devops') || l.includes('deployment') || l.includes('ci') || l.includes('cd'))) { + // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide' + if (lowerLabels.some(l => + l.includes('infrastructure') || + l.includes('devops') || + l.includes('deployment') || + labelMatchesWholeWord(l, 'ci') || + labelMatchesWholeWord(l, 'cd') + )) { return 'infrastructure'; } @@ -89,7 +97,8 @@ export async function createSpecForIssue( issueTitle: string, taskDescription: string, githubUrl: string, - labels: string[] = [] + labels: string[] = [], + baseBranch?: string ): Promise { const specsBaseDir = getSpecsDir(project.autoBuildPath); const specsDir = path.join(project.path, specsBaseDir); @@ -144,7 +153,10 @@ export async function createSpecForIssue( sourceType: 'github', githubIssueNumber: issueNumber, githubUrl, - category + category, + // Store baseBranch for worktree creation and QA comparison + // This comes from project.settings.mainBranch or task-level override + ...(baseBranch && { baseBranch }) }; writeFileSync( path.join(specDir, 'task_metadata.json'), diff --git a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts index 7e0f960be5..a84e44a79c 100644 --- a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts @@ -19,6 +19,7 @@ import type { Project, AppSettings } from '../../../shared/types'; import { createContextLogger } from './utils/logger'; import { withProjectOrNull } from './utils/project-middleware'; import { createIPCCommunicators } from './utils/ipc-communicator'; +import { getRunnerEnv } from './utils/runner-env'; import { runPythonSubprocess, getPythonPath, @@ -254,10 +255,13 @@ async function runTriage( debugLog('Spawning triage process', { args, model, thinkingLevel }); + const subprocessEnv = await getRunnerEnv(); + const { promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { debugLog('Progress update', { percent, message }); sendProgress({ diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts new file mode 100644 index 0000000000..d2a2546892 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts @@ -0,0 +1,55 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; + +const mockGetAPIProfileEnv = vi.fn(); +const mockGetOAuthModeClearVars = vi.fn(); + +vi.mock('../../../../services/profile', () => ({ + getAPIProfileEnv: (...args: unknown[]) => mockGetAPIProfileEnv(...args), +})); + +vi.mock('../../../../agent/env-utils', () => ({ + getOAuthModeClearVars: (...args: unknown[]) => mockGetOAuthModeClearVars(...args), +})); + +import { getRunnerEnv } from '../runner-env'; + +describe('getRunnerEnv', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('merges API profile env with OAuth clear vars', async () => { + mockGetAPIProfileEnv.mockResolvedValue({ + ANTHROPIC_AUTH_TOKEN: 'token', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + mockGetOAuthModeClearVars.mockReturnValue({ + ANTHROPIC_AUTH_TOKEN: '', + }); + + const result = await getRunnerEnv(); + + expect(mockGetOAuthModeClearVars).toHaveBeenCalledWith({ + ANTHROPIC_AUTH_TOKEN: 'token', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + }); + + it('includes extra env values', async () => { + mockGetAPIProfileEnv.mockResolvedValue({ + ANTHROPIC_AUTH_TOKEN: 'token', + }); + mockGetOAuthModeClearVars.mockReturnValue({}); + + const result = await getRunnerEnv({ USE_CLAUDE_MD: 'true' }); + + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'token', + USE_CLAUDE_MD: 'true', + }); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts new file mode 100644 index 0000000000..b4a3cbe88d --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts @@ -0,0 +1,31 @@ +import { getOAuthModeClearVars } from '../../../agent/env-utils'; +import { getAPIProfileEnv } from '../../../services/profile'; +import { getProfileEnv } from '../../../rate-limit-detector'; + +/** + * Get environment variables for Python runner subprocesses. + * + * Environment variable precedence (lowest to highest): + * 1. apiProfileEnv - Custom Anthropic-compatible API profile (ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN) + * 2. oauthModeClearVars - Clears stale ANTHROPIC_* vars when in OAuth mode + * 3. profileEnv - Claude OAuth token from profile manager (CLAUDE_CODE_OAUTH_TOKEN) + * 4. extraEnv - Caller-specific vars (e.g., USE_CLAUDE_MD) + * + * The profileEnv is critical for OAuth authentication (#563) - it retrieves the + * decrypted OAuth token from the profile manager's encrypted storage (macOS Keychain + * via Electron's safeStorage API). + */ +export async function getRunnerEnv( + extraEnv?: Record +): Promise> { + const apiProfileEnv = await getAPIProfileEnv(); + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + const profileEnv = getProfileEnv(); + + return { + ...apiProfileEnv, + ...oauthModeClearVars, + ...profileEnv, // OAuth token from profile manager (fixes #563) + ...extraEnv, + }; +} diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts index 8fe079820b..71f26ef36f 100644 --- a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts +++ b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts @@ -4,11 +4,15 @@ import { runPythonSubprocess } from './subprocess-runner'; import * as childProcess from 'child_process'; import EventEmitter from 'events'; -// Mock child_process.spawn -vi.mock('child_process', () => ({ - spawn: vi.fn(), - exec: vi.fn(), -})); +// Mock child_process with importOriginal to preserve all exports +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: vi.fn(), + exec: vi.fn(), + }; +}); // Mock parsePythonCommand vi.mock('../../../python-detector', () => ({ diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts index eea6215d90..7b343efb27 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts @@ -63,7 +63,7 @@ export function registerImportIssues(): void { ) as GitLabAPIIssue; // Create a spec/task from the issue - const task = await createSpecForIssue(project, apiIssue, config); + const task = await createSpecForIssue(project, apiIssue, config, project.settings?.mainBranch); if (task) { tasks.push(task); diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts index 20b1a422cd..f383f03204 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts @@ -158,7 +158,7 @@ export function registerInvestigateIssue( }); // Create spec for the issue - const task = await createSpecForIssue(project, issue, config); + const task = await createSpecForIssue(project, issue, config, project.settings?.mainBranch); if (!task) { sendError(getMainWindow, project.id, 'Failed to create task from issue'); diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts index a8830ca320..c624a63f70 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts @@ -7,6 +7,7 @@ import { mkdir, writeFile, readFile, stat } from 'fs/promises'; import path from 'path'; import type { Project } from '../../../shared/types'; import type { GitLabAPIIssue, GitLabConfig } from './types'; +import { labelMatchesWholeWord } from '../shared/label-utils'; /** * Simplified task info returned when creating a spec from a GitLab issue. @@ -60,6 +61,47 @@ function debugLog(message: string, data?: unknown): void { } } +/** + * Determine task category based on GitLab issue labels + * Maps to TaskCategory type from shared/types/task.ts + */ +function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' | 'refactoring' | 'documentation' | 'security' | 'performance' | 'ui_ux' | 'infrastructure' | 'testing' { + const lowerLabels = labels.map(l => l.toLowerCase()); + + if (lowerLabels.some(l => l.includes('bug') || l.includes('defect') || l.includes('error') || l.includes('fix'))) { + return 'bug_fix'; + } + if (lowerLabels.some(l => l.includes('security') || l.includes('vulnerability') || l.includes('cve'))) { + return 'security'; + } + if (lowerLabels.some(l => l.includes('performance') || l.includes('optimization') || l.includes('speed'))) { + return 'performance'; + } + if (lowerLabels.some(l => l.includes('ui') || l.includes('ux') || l.includes('design') || l.includes('styling'))) { + return 'ui_ux'; + } + // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide' + if (lowerLabels.some(l => + l.includes('infrastructure') || + l.includes('devops') || + l.includes('deployment') || + labelMatchesWholeWord(l, 'ci') || + labelMatchesWholeWord(l, 'cd') + )) { + return 'infrastructure'; + } + if (lowerLabels.some(l => l.includes('test') || l.includes('testing') || l.includes('qa'))) { + return 'testing'; + } + if (lowerLabels.some(l => l.includes('refactor') || l.includes('cleanup') || l.includes('maintenance') || l.includes('chore') || l.includes('tech-debt') || l.includes('technical debt'))) { + return 'refactoring'; + } + if (lowerLabels.some(l => l.includes('documentation') || l.includes('docs'))) { + return 'documentation'; + } + return 'feature'; +} + function stripControlChars(value: string, allowNewlines: boolean): string { let sanitized = ''; for (let i = 0; i < value.length; i += 1) { @@ -258,7 +300,8 @@ async function pathExists(filePath: string): Promise { export async function createSpecForIssue( project: Project, issue: GitLabAPIIssue, - config: GitLabConfig + config: GitLabConfig, + baseBranch?: string ): Promise { try { // Validate and sanitize network data before writing to disk @@ -321,7 +364,7 @@ export async function createSpecForIssue( const taskContent = buildIssueContext(safeIssue, safeProject, config.instanceUrl); await writeFile(path.join(specDir, 'TASK.md'), taskContent, 'utf-8'); - // Create metadata.json + // Create metadata.json (legacy format for GitLab-specific data) const metadata = { source: 'gitlab', gitlab: { @@ -339,6 +382,21 @@ export async function createSpecForIssue( }; await writeFile(metadataPath, JSON.stringify(metadata, null, 2), 'utf-8'); + // Create task_metadata.json (consistent with GitHub format for backend compatibility) + const taskMetadata = { + sourceType: 'gitlab' as const, + gitlabIssueIid: safeIssue.iid, + gitlabUrl: safeIssue.web_url, + category: determineCategoryFromLabels(safeIssue.labels || []), + // Store baseBranch for worktree creation and QA comparison + ...(baseBranch && { baseBranch }) + }; + await writeFile( + path.join(specDir, 'task_metadata.json'), + JSON.stringify(taskMetadata, null, 2), + 'utf-8' + ); + debugLog('Created spec for issue:', { iid: safeIssue.iid, specDir }); // Return task info diff --git a/apps/frontend/src/main/ipc-handlers/index.ts b/apps/frontend/src/main/ipc-handlers/index.ts index 3501abd8bc..18d413e6cc 100644 --- a/apps/frontend/src/main/ipc-handlers/index.ts +++ b/apps/frontend/src/main/ipc-handlers/index.ts @@ -23,7 +23,7 @@ import { registerEnvHandlers } from './env-handlers'; import { registerLinearHandlers } from './linear-handlers'; import { registerGithubHandlers } from './github-handlers'; import { registerGitlabHandlers } from './gitlab-handlers'; -import { registerAutobuildSourceHandlers } from './autobuild-source-handlers'; +import { registerADOHandlers } from './ado'; import { registerIdeationHandlers } from './ideation-handlers'; import { registerChangelogHandlers } from './changelog-handlers'; import { registerInsightsHandlers } from './insights-handlers'; @@ -32,6 +32,8 @@ import { registerAppUpdateHandlers } from './app-update-handlers'; import { registerDebugHandlers } from './debug-handlers'; import { registerClaudeCodeHandlers } from './claude-code-handlers'; import { registerMcpHandlers } from './mcp-handlers'; +import { registerProfileHandlers } from './profile-handlers'; +import { registerTerminalWorktreeIpcHandlers } from './terminal'; import { notificationService } from '../notification-service'; /** @@ -60,6 +62,9 @@ export function setupIpcHandlers( // Terminal and Claude profile handlers registerTerminalHandlers(terminalManager, getMainWindow); + // Terminal worktree handlers (isolated development in worktrees) + registerTerminalWorktreeIpcHandlers(); + // Agent event handlers (event forwarding from agent manager to renderer) registerAgenteventsHandlers(agentManager, getMainWindow); @@ -87,8 +92,8 @@ export function setupIpcHandlers( // GitLab integration handlers registerGitlabHandlers(agentManager, getMainWindow); - // Auto-build source update handlers - registerAutobuildSourceHandlers(getMainWindow); + // Azure DevOps integration handlers + registerADOHandlers(); // Ideation handlers registerIdeationHandlers(agentManager, getMainWindow); @@ -114,6 +119,9 @@ export function setupIpcHandlers( // MCP server health check handlers registerMcpHandlers(); + // API Profile handlers (custom Anthropic-compatible endpoints) + registerProfileHandlers(); + console.warn('[IPC] All handler modules registered successfully'); } @@ -122,6 +130,7 @@ export { registerProjectHandlers, registerTaskHandlers, registerTerminalHandlers, + registerTerminalWorktreeIpcHandlers, registerAgenteventsHandlers, registerSettingsHandlers, registerFileHandlers, @@ -131,7 +140,7 @@ export { registerLinearHandlers, registerGithubHandlers, registerGitlabHandlers, - registerAutobuildSourceHandlers, + registerADOHandlers, registerIdeationHandlers, registerChangelogHandlers, registerInsightsHandlers, @@ -139,5 +148,6 @@ export { registerAppUpdateHandlers, registerDebugHandlers, registerClaudeCodeHandlers, - registerMcpHandlers + registerMcpHandlers, + registerProfileHandlers }; diff --git a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts index 0515529973..50e16973e4 100644 --- a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts @@ -28,6 +28,12 @@ const DANGEROUS_FLAGS = new Set([ '--require', '-r' ]); +/** + * Defense-in-depth: Shell metacharacters that could enable command injection + * when shell: true is used on Windows + */ +const SHELL_METACHARACTERS = ['&', '|', '>', '<', '^', '%', ';', '$', '`', '\n', '\r']; + /** * Validate that a command is in the safe allowlist */ @@ -39,11 +45,22 @@ function isCommandSafe(command: string | undefined): boolean { } /** - * Validate that args don't contain dangerous interpreter flags + * Validate that args don't contain dangerous interpreter flags or shell metacharacters */ function areArgsSafe(args: string[] | undefined): boolean { if (!args || args.length === 0) return true; - return !args.some(arg => DANGEROUS_FLAGS.has(arg)); + + // Check for dangerous interpreter flags + if (args.some(arg => DANGEROUS_FLAGS.has(arg))) return false; + + // On Windows with shell: true, check for shell metacharacters that could enable injection + if (process.platform === 'win32') { + if (args.some(arg => SHELL_METACHARACTERS.some(char => arg.includes(char)))) { + return false; + } + } + + return true; } /** @@ -171,7 +188,7 @@ async function checkCommandHealth(server: CustomMcpServer, startTime: number): P return resolve({ serverId: server.id, status: 'unhealthy', - message: 'Args contain dangerous interpreter flags', + message: 'Args contain dangerous flags or shell metacharacters', checkedAt: new Date().toISOString(), }); } @@ -394,14 +411,17 @@ async function testCommandConnection(server: CustomMcpServer, startTime: number) return resolve({ serverId: server.id, success: false, - message: 'Args contain dangerous interpreter flags', + message: 'Args contain dangerous flags or shell metacharacters', }); } const args = server.args || []; + + // On Windows, use shell: true to properly handle .cmd/.bat scripts like npx const proc = spawn(server.command!, args, { stdio: ['pipe', 'pipe', 'pipe'], timeout: 15000, // OS-level timeout for reliable process termination + shell: process.platform === 'win32', // Required for Windows to run npx.cmd }); let stdout = ''; diff --git a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts index 5b8c6d0504..9ea2b79ab4 100644 --- a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts @@ -25,7 +25,7 @@ import { } from '../memory-service'; import { validateOpenAIApiKey } from '../api-validation-service'; import { parsePythonCommand } from '../python-detector'; -import { getConfiguredPythonPath } from '../python-env-manager'; +import { getConfiguredPythonPath, pythonEnvManager } from '../python-env-manager'; import { openTerminalWithCommand } from './claude-code-handlers'; /** @@ -212,7 +212,11 @@ function checkOllamaInstalled(): OllamaInstallStatus { * - Official method per https://winstall.app/apps/Ollama.Ollama * - Winget is pre-installed on Windows 10 (1709+) and Windows 11 * - * macOS/Linux: Uses official install script from https://ollama.com/download + * macOS: Uses Homebrew (most common package manager on macOS) + * - Official method: brew install ollama + * - Reference: https://ollama.com/download/mac + * + * Linux: Uses official install script from https://ollama.com/download * * @returns {string} The install command to run in terminal */ @@ -222,8 +226,13 @@ function getOllamaInstallCommand(): string { // This is an official installation method for Ollama on Windows // Reference: https://winstall.app/apps/Ollama.Ollama return 'winget install --id Ollama.Ollama --accept-source-agreements'; + } else if (process.platform === 'darwin') { + // macOS: Use Homebrew (most widely used package manager on macOS) + // Official Ollama installation method for macOS + // Reference: https://ollama.com/download/mac + return 'brew install ollama'; } else { - // macOS/Linux: Use shell script from official Ollama + // Linux: Use shell script from official Ollama // Reference: https://ollama.com/download return 'curl -fsSL https://ollama.com/install.sh | sh'; } @@ -296,6 +305,9 @@ async function executeOllamaDetector( let resolved = false; const proc = spawn(pythonExe, args, { stdio: ['ignore', 'pipe', 'pipe'], + // Use sanitized Python environment to prevent PYTHONHOME contamination + // Fixes "Could not find platform independent libraries" error on Windows + env: pythonEnvManager.getPythonEnv(), }); let stdout = ''; @@ -769,6 +781,9 @@ export function registerMemoryHandlers(): void { const proc = spawn(pythonExe, args, { stdio: ['ignore', 'pipe', 'pipe'], timeout: 600000, // 10 minute timeout for large models + // Use sanitized Python environment to prevent PYTHONHOME contamination + // Fixes "Could not find platform independent libraries" error on Windows + env: pythonEnvManager.getPythonEnv(), }); let stdout = ''; diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts new file mode 100644 index 0000000000..0e115e4647 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts @@ -0,0 +1,341 @@ +/** + * Tests for profile IPC handlers + * + * Tests profiles:set-active handler with support for: + * - Setting valid profile as active + * - Switching to OAuth (null profileId) + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import type { APIProfile, ProfilesFile } from '@shared/types/profile'; + +// Hoist mocked functions to avoid circular dependency in atomicModifyProfiles +const { mockedLoadProfilesFile, mockedSaveProfilesFile } = vi.hoisted(() => ({ + mockedLoadProfilesFile: vi.fn(), + mockedSaveProfilesFile: vi.fn() +})); + +// Mock electron before importing +vi.mock('electron', () => ({ + ipcMain: { + handle: vi.fn(), + on: vi.fn() + } +})); + +// Mock profile service +vi.mock('../services/profile', () => ({ + loadProfilesFile: mockedLoadProfilesFile, + saveProfilesFile: mockedSaveProfilesFile, + validateFilePermissions: vi.fn(), + getProfilesFilePath: vi.fn(() => '/test/profiles.json'), + createProfile: vi.fn(), + updateProfile: vi.fn(), + deleteProfile: vi.fn(), + testConnection: vi.fn(), + discoverModels: vi.fn(), + atomicModifyProfiles: vi.fn(async (modifier: (file: unknown) => unknown) => { + const file = await mockedLoadProfilesFile(); + const modified = modifier(file); + await mockedSaveProfilesFile(modified as never); + return modified; + }) +})); + +import { registerProfileHandlers } from './profile-handlers'; +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import { + loadProfilesFile, + saveProfilesFile, + validateFilePermissions, + testConnection +} from '../services/profile'; +import type { TestConnectionResult } from '@shared/types/profile'; + +// Get the handler function for testing +function getSetActiveHandler() { + const calls = (ipcMain.handle as unknown as ReturnType).mock.calls; + const setActiveCall = calls.find( + (call) => call[0] === IPC_CHANNELS.PROFILES_SET_ACTIVE + ); + return setActiveCall?.[1]; +} + +// Get the testConnection handler function for testing +function getTestConnectionHandler() { + const calls = (ipcMain.handle as unknown as ReturnType).mock.calls; + const testConnectionCall = calls.find( + (call) => call[0] === IPC_CHANNELS.PROFILES_TEST_CONNECTION + ); + return testConnectionCall?.[1]; +} + +describe('profile-handlers - setActiveProfile', () => { + beforeEach(() => { + vi.clearAllMocks(); + registerProfileHandlers(); + }); + const mockProfiles: APIProfile[] = [ + { + id: 'profile-1', + name: 'Test Profile 1', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key-1', + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-2', + name: 'Test Profile 2', + baseUrl: 'https://custom.api.com', + apiKey: 'sk-custom-key-2', + createdAt: Date.now(), + updatedAt: Date.now() + } + ]; + + describe('setting valid profile as active', () => { + it('should set active profile with valid profileId', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalledWith( + expect.objectContaining({ + activeProfileId: 'profile-1' + }) + ); + }); + + it('should return error for non-existent profile', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'non-existent-id'); + + expect(result).toEqual({ + success: false, + error: 'Profile not found' + }); + }); + }); + + describe('switching to OAuth (null profileId)', () => { + it('should accept null profileId to switch to OAuth', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: 'profile-1', + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, null); + + // Should succeed and clear activeProfileId + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalledWith( + expect.objectContaining({ + activeProfileId: null + }) + ); + }); + + it('should handle null when no profile was active', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, null); + + // Should succeed (idempotent operation) + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + }); + + describe('error handling', () => { + it('should handle loadProfilesFile errors', async () => { + vi.mocked(loadProfilesFile).mockRejectedValue( + new Error('Failed to load profiles') + ); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ + success: false, + error: 'Failed to load profiles' + }); + }); + + it('should handle saveProfilesFile errors', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockRejectedValue( + new Error('Failed to save') + ); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ + success: false, + error: 'Failed to save' + }); + }); + }); +}); + +describe('profile-handlers - testConnection', () => { + beforeEach(() => { + vi.clearAllMocks(); + registerProfileHandlers(); + }); + + describe('successful connection tests', () => { + it('should return success result for valid connection', async () => { + const mockResult: TestConnectionResult = { + success: true, + message: 'Connection successful' + }; + + vi.mocked(testConnection).mockResolvedValue(mockResult); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: true, + data: mockResult + }); + expect(testConnection).toHaveBeenCalledWith( + 'https://api.anthropic.com', + 'sk-test-key-12chars', + expect.any(AbortSignal) + ); + }); + }); + + describe('input validation', () => { + it('should return error for empty baseUrl', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, '', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Base URL is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for whitespace-only baseUrl', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, ' ', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Base URL is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for empty apiKey', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', ''); + + expect(result).toEqual({ + success: false, + error: 'API key is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for whitespace-only apiKey', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', ' '); + + expect(result).toEqual({ + success: false, + error: 'API key is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + }); + + describe('error handling', () => { + it('should return IPCResult with TestConnectionResult data for service errors', async () => { + const mockResult: TestConnectionResult = { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + + vi.mocked(testConnection).mockResolvedValue(mockResult); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'invalid-key'); + + expect(result).toEqual({ + success: true, + data: mockResult + }); + }); + + it('should return error for unexpected exceptions', async () => { + vi.mocked(testConnection).mockRejectedValue(new Error('Unexpected error')); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Unexpected error' + }); + }); + + it('should return error for non-Error exceptions', async () => { + vi.mocked(testConnection).mockRejectedValue('String error'); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Failed to test connection' + }); + }); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts new file mode 100644 index 0000000000..6d4cfacbb7 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts @@ -0,0 +1,358 @@ +/** + * Profile IPC Handlers + * + * IPC handlers for API profile management: + * - profiles:get - Get all profiles + * - profiles:save - Save/create a profile + * - profiles:update - Update an existing profile + * - profiles:delete - Delete a profile + * - profiles:setActive - Set active profile + * - profiles:test-connection - Test API profile connection + */ + +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import type { IPCResult } from '../../shared/types'; +import type { APIProfile, ProfileFormData, ProfilesFile, TestConnectionResult, DiscoverModelsResult } from '@shared/types/profile'; +import { + loadProfilesFile, + saveProfilesFile, + validateFilePermissions, + getProfilesFilePath, + atomicModifyProfiles, + createProfile, + updateProfile, + deleteProfile, + testConnection, + discoverModels +} from '../services/profile'; + +// Track active test connection requests for cancellation +const activeTestConnections = new Map(); + +// Track active discover models requests for cancellation +const activeDiscoverModelsRequests = new Map(); + +/** + * Register all profile-related IPC handlers + */ +export function registerProfileHandlers(): void { + /** + * Get all profiles + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_GET, + async (): Promise> => { + try { + const profiles = await loadProfilesFile(); + return { success: true, data: profiles }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to load profiles' + }; + } + } + ); + + /** + * Save/create a profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_SAVE, + async ( + _, + profileData: ProfileFormData + ): Promise> => { + try { + // Use createProfile from service layer (handles validation) + const newProfile = await createProfile(profileData); + + // Set file permissions to user-readable only + await validateFilePermissions(getProfilesFilePath()).catch((err) => { + console.warn('[profile-handlers] Failed to set secure file permissions:', err); + }); + + return { success: true, data: newProfile }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to save profile' + }; + } + } + ); + + /** + * Update an existing profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_UPDATE, + async (_, profileData: APIProfile): Promise> => { + try { + // Use updateProfile from service layer (handles validation) + const updatedProfile = await updateProfile({ + id: profileData.id, + name: profileData.name, + baseUrl: profileData.baseUrl, + apiKey: profileData.apiKey, + models: profileData.models + }); + + // Set file permissions to user-readable only + await validateFilePermissions(getProfilesFilePath()).catch((err) => { + console.warn('[profile-handlers] Failed to set secure file permissions:', err); + }); + + return { success: true, data: updatedProfile }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to update profile' + }; + } + } + ); + + /** + * Delete a profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_DELETE, + async (_, profileId: string): Promise => { + try { + // Use deleteProfile from service layer (handles validation) + await deleteProfile(profileId); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to delete profile' + }; + } + } + ); + + /** + * Set active profile + * - If profileId is provided, set that profile as active + * - If profileId is null, clear active profile (switch to OAuth) + * Uses atomic operation to prevent race conditions + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_SET_ACTIVE, + async (_, profileId: string | null): Promise => { + try { + await atomicModifyProfiles((file) => { + // If switching to OAuth (null), clear active profile + if (profileId === null) { + file.activeProfileId = null; + return file; + } + + // Check if profile exists + const profileExists = file.profiles.some((p) => p.id === profileId); + if (!profileExists) { + throw new Error('Profile not found'); + } + + // Set active profile + file.activeProfileId = profileId; + return file; + }); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to set active profile' + }; + } + } + ); + + /** + * Test API profile connection + * - Tests credentials by making a minimal API request + * - Returns detailed error information for different failure types + * - Includes configurable timeout (defaults to 15 seconds) + * - Supports cancellation via PROFILES_TEST_CONNECTION_CANCEL + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_TEST_CONNECTION, + async (_event, baseUrl: string, apiKey: string, requestId: number): Promise> => { + // Create AbortController for timeout and cancellation + const controller = new AbortController(); + const timeoutMs = 15000; // 15 seconds + + // Track this request for cancellation + activeTestConnections.set(requestId, controller); + + // Set timeout to abort the request + const timeoutId = setTimeout(() => { + controller.abort(); + }, timeoutMs); + + try { + // Validate inputs (null/empty checks) + if (!baseUrl || baseUrl.trim() === '') { + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + return { + success: false, + error: 'Base URL is required' + }; + } + + if (!apiKey || apiKey.trim() === '') { + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + return { + success: false, + error: 'API key is required' + }; + } + + // Call testConnection from service layer with abort signal + const result = await testConnection(baseUrl, apiKey, controller.signal); + + // Clear timeout on success + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + + return { success: true, data: result }; + } catch (error) { + // Clear timeout on error + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + + // Handle abort errors (timeout or explicit cancellation) + if (error instanceof Error && error.name === 'AbortError') { + return { + success: false, + error: 'Connection timeout. The request took too long to complete.' + }; + } + + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to test connection' + }; + } + } + ); + + /** + * Cancel an active test connection request + */ + ipcMain.on( + IPC_CHANNELS.PROFILES_TEST_CONNECTION_CANCEL, + (_event, requestId: number) => { + const controller = activeTestConnections.get(requestId); + if (controller) { + controller.abort(); + activeTestConnections.delete(requestId); + } + } + ); + + /** + * Discover available models from API endpoint + * - Fetches list of models from /v1/models endpoint + * - Returns model IDs and display names for dropdown selection + * - Supports cancellation via PROFILES_DISCOVER_MODELS_CANCEL + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_DISCOVER_MODELS, + async (_event, baseUrl: string, apiKey: string, requestId: number): Promise> => { + console.log('[discoverModels] Called with:', { baseUrl, requestId }); + + // Create AbortController for timeout and cancellation + const controller = new AbortController(); + const timeoutMs = 15000; // 15 seconds + + // Track this request for cancellation + activeDiscoverModelsRequests.set(requestId, controller); + + // Set timeout to abort the request + const timeoutId = setTimeout(() => { + controller.abort(); + }, timeoutMs); + + try { + // Validate inputs (null/empty checks) + if (!baseUrl || baseUrl.trim() === '') { + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + return { + success: false, + error: 'Base URL is required' + }; + } + + if (!apiKey || apiKey.trim() === '') { + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + return { + success: false, + error: 'API key is required' + }; + } + + // Call discoverModels from service layer with abort signal + const result = await discoverModels(baseUrl, apiKey, controller.signal); + + // Clear timeout on success + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + + return { success: true, data: result }; + } catch (error) { + // Clear timeout on error + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + + // Handle abort errors (timeout or explicit cancellation) + if (error instanceof Error && error.name === 'AbortError') { + return { + success: false, + error: 'Connection timeout. The request took too long to complete.' + }; + } + + // Extract error type if available + const errorType = (error as any).errorType; + const errorMessage = error instanceof Error ? error.message : 'Failed to discover models'; + + // Log for debugging + console.error('[discoverModels] Error:', { + name: error instanceof Error ? error.name : 'unknown', + message: errorMessage, + errorType, + originalError: error + }); + + // Include error type in error message for UI to handle appropriately + return { + success: false, + error: errorMessage + }; + } + } + ); + + /** + * Cancel an active discover models request + */ + ipcMain.on( + IPC_CHANNELS.PROFILES_DISCOVER_MODELS_CANCEL, + (_event, requestId: number) => { + const controller = activeDiscoverModelsRequests.get(requestId); + if (controller) { + controller.abort(); + activeDiscoverModelsRequests.delete(requestId); + } + } + ); +} diff --git a/apps/frontend/src/main/ipc-handlers/project-handlers.ts b/apps/frontend/src/main/ipc-handlers/project-handlers.ts index 4ca0eb726b..d752be8d7f 100644 --- a/apps/frontend/src/main/ipc-handlers/project-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/project-handlers.ts @@ -34,16 +34,56 @@ import { getEffectiveSourcePath } from '../updater/path-resolver'; // ============================================ /** - * Get list of git branches for a directory + * Get list of git branches for a directory (both local and remote) */ function getGitBranches(projectPath: string): string[] { try { - const result = execFileSync(getToolPath('git'), ['branch', '--list', '--format=%(refname:short)'], { + // First fetch to ensure we have latest remote refs + try { + execFileSync(getToolPath('git'), ['fetch', '--prune'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000 // 10 second timeout for fetch + }); + } catch { + // Fetch may fail if offline or no remote, continue with local refs + } + + // Get all branches (local + remote) using --all flag + const result = execFileSync(getToolPath('git'), ['branch', '--all', '--format=%(refname:short)'], { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }); - return result.trim().split('\n').filter(b => b.trim()); + + const branches = result.trim().split('\n') + .filter(b => b.trim()) + .map(b => { + // Remote branches come as "origin/branch-name", keep the full name + // but remove the "origin/" prefix for display while keeping it usable + return b.trim(); + }) + // Remove HEAD pointer entries like "origin/HEAD" + .filter(b => !b.endsWith('/HEAD')) + // Remove duplicates (local branch may exist alongside remote) + .filter((branch, index, self) => { + // If it's a remote branch (origin/x) and local version exists, keep local + if (branch.startsWith('origin/')) { + const localName = branch.replace('origin/', ''); + return !self.includes(localName); + } + return self.indexOf(branch) === index; + }); + + // Sort: local branches first, then remote branches + return branches.sort((a, b) => { + const aIsRemote = a.startsWith('origin/'); + const bIsRemote = b.startsWith('origin/'); + if (aIsRemote && !bIsRemote) return 1; + if (!aIsRemote && bIsRemote) return -1; + return a.localeCompare(b); + }); } catch { return []; } diff --git a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts index 0eb8b3aa13..62f9faee98 100644 --- a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts +++ b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts @@ -96,6 +96,57 @@ function transformPhase(raw: RawRoadmapPhase): RoadmapPhase { }; } +/** + * Maps all known backend status values to canonical Kanban column statuses. + * Includes valid statuses as identity mappings for consistent lookup. + * Module-level constant for efficiency (not recreated on each call). + */ +const STATUS_MAP: Record = { + // Canonical Kanban statuses (identity mappings) + 'under_review': 'under_review', + 'planned': 'planned', + 'in_progress': 'in_progress', + 'done': 'done', + // Early-stage / ideation statuses β†’ under_review + 'idea': 'under_review', + 'backlog': 'under_review', + 'proposed': 'under_review', + 'pending': 'under_review', + // Approved / scheduled statuses β†’ planned + 'approved': 'planned', + 'scheduled': 'planned', + // Active development statuses β†’ in_progress + 'active': 'in_progress', + 'building': 'in_progress', + // Completed statuses β†’ done + 'complete': 'done', + 'completed': 'done', + 'shipped': 'done' +}; + +/** + * Normalizes a feature status string to a valid Kanban column status. + * Handles case-insensitive matching and maps backend values to canonical statuses. + * + * @param status - The raw status string from the backend + * @returns A valid RoadmapFeature status for Kanban display + */ +function normalizeFeatureStatus(status: string | undefined): RoadmapFeature['status'] { + if (!status) return 'under_review'; + + const normalized = STATUS_MAP[status.toLowerCase()]; + + if (!normalized) { + // Debug log for unmapped statuses to aid future mapping additions + if (process.env.NODE_ENV === 'development') { + console.debug(`[Roadmap] normalizeFeatureStatus: unmapped status "${status}", defaulting to "under_review"`); + } + return 'under_review'; + } + + return normalized; +} + function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { return { id: raw.id, @@ -107,7 +158,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { impact: (raw.impact as RoadmapFeature['impact']) || 'medium', phaseId: raw.phase_id || raw.phaseId || '', dependencies: raw.dependencies || [], - status: (raw.status as RoadmapFeature['status']) || 'under_review', + status: normalizeFeatureStatus(raw.status), acceptanceCriteria: raw.acceptance_criteria || raw.acceptanceCriteria || [], userStories: raw.user_stories || raw.userStories || [], linkedSpecId: raw.linked_spec_id || raw.linkedSpecId, @@ -115,6 +166,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { }; } + export function transformRoadmapFromSnakeCase( raw: RawRoadmap, projectId: string, diff --git a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt index 5432d01173..ff5bb4bd42 100644 --- a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt +++ b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt @@ -304,9 +304,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT try { // Check if Claude CLI is available and authenticated const result = await new Promise((resolve) => { - const proc = spawn('claude', ['--version'], { + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const proc = spawn(claudeCmd, ['--version'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true }); @@ -325,9 +326,9 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT if (code === 0) { // Claude CLI is available, check if authenticated // Run a simple command that requires auth - const authCheck = spawn('claude', ['api', '--help'], { + const authCheck = spawn(claudeCmd, ['api', '--help'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true }); @@ -384,9 +385,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT try { // Run claude setup-token which will open browser for OAuth const result = await new Promise((resolve) => { - const proc = spawn('claude', ['setup-token'], { + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const proc = spawn(claudeCmd, ['setup-token'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true, stdio: 'inherit' // This allows the terminal to handle the interactive auth }); diff --git a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts index d6e7b94ff4..9aecfca97d 100644 --- a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts @@ -1,19 +1,21 @@ import { ipcMain, dialog, app, shell } from 'electron'; -import { existsSync, writeFileSync, mkdirSync, statSync } from 'fs'; +import { existsSync, writeFileSync, mkdirSync, statSync, readFileSync } from 'fs'; import { execFileSync } from 'node:child_process'; import path from 'path'; import { is } from '@electron-toolkit/utils'; import { IPC_CHANNELS, DEFAULT_APP_SETTINGS, DEFAULT_AGENT_PROFILES } from '../../shared/constants'; import type { AppSettings, - IPCResult + IPCResult, + SourceEnvConfig, + SourceEnvCheckResult } from '../../shared/types'; import { AgentManager } from '../agent'; import type { BrowserWindow } from 'electron'; -import { getEffectiveVersion } from '../auto-claude-updater'; -import { setUpdateChannel } from '../app-updater'; +import { setUpdateChannel, setUpdateChannelWithDowngradeCheck } from '../app-updater'; import { getSettingsPath, readSettingsFile } from '../settings-utils'; -import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform } from '../cli-tool-manager'; +import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform, preWarmToolCache } from '../cli-tool-manager'; +import { parseEnvFile } from './utils'; const settingsPath = getSettingsPath(); @@ -34,13 +36,16 @@ const detectAutoBuildSourcePath = (): string | null => { ); } else { // Production mode paths (packaged app) - // On Windows/Linux/macOS, the app might be installed anywhere - // We check common locations relative to the app bundle + // The backend is bundled as extraResources/backend + // On all platforms, it should be at process.resourcesPath/backend + possiblePaths.push( + path.resolve(process.resourcesPath, 'backend') // Primary: extraResources/backend + ); + // Fallback paths for different app structures const appPath = app.getAppPath(); possiblePaths.push( - path.resolve(appPath, '..', 'backend'), // Sibling to app - path.resolve(appPath, '..', '..', 'backend'), // Up 2 from app - path.resolve(process.resourcesPath, '..', 'backend') // Relative to resources + path.resolve(appPath, '..', 'backend'), // Sibling to asar + path.resolve(appPath, '..', '..', 'Resources', 'backend') // macOS bundle structure ); } @@ -166,6 +171,11 @@ export function registerSettingsHandlers( claudePath: settings.claudePath, }); + // Re-warm cache asynchronously after configuring (non-blocking) + preWarmToolCache(['claude']).catch((error) => { + console.warn('[SETTINGS_GET] Failed to re-warm CLI cache:', error); + }); + return { success: true, data: settings as AppSettings }; } ); @@ -207,12 +217,25 @@ export function registerSettingsHandlers( githubCLIPath: newSettings.githubCLIPath, claudePath: newSettings.claudePath, }); + + // Re-warm cache asynchronously after configuring (non-blocking) + preWarmToolCache(['claude']).catch((error) => { + console.warn('[SETTINGS_SAVE] Failed to re-warm CLI cache:', error); + }); } // Update auto-updater channel if betaUpdates setting changed if (settings.betaUpdates !== undefined) { - const channel = settings.betaUpdates ? 'beta' : 'latest'; - setUpdateChannel(channel); + if (settings.betaUpdates) { + // Enabling beta updates - just switch channel + setUpdateChannel('beta'); + } else { + // Disabling beta updates - switch to stable and check if downgrade is available + // This will notify the renderer if user is on a prerelease and stable version exists + setUpdateChannelWithDowngradeCheck('latest', true).catch((err) => { + console.error('[settings-handlers] Failed to check for stable downgrade:', err); + }); + } } return { success: true }; @@ -372,8 +395,8 @@ export function registerSettingsHandlers( // ============================================ ipcMain.handle(IPC_CHANNELS.APP_VERSION, async (): Promise => { - // Use effective version which accounts for source updates - const version = getEffectiveVersion(); + // Return the actual bundled version from package.json + const version = app.getVersion(); console.log('[settings-handlers] APP_VERSION returning:', version); return version; }); @@ -499,4 +522,238 @@ export function registerSettingsHandlers( } } ); + + // ============================================ + // Auto-Build Source Environment Operations + // ============================================ + + /** + * Helper to get source .env path from settings + * + * In production mode, the .env file is NOT bundled (excluded in electron-builder config). + * We store the source .env in app userData directory instead, which is writable. + * The sourcePath points to the bundled backend for reference, but envPath is in userData. + */ + const getSourceEnvPath = (): { + sourcePath: string | null; + envPath: string | null; + isProduction: boolean; + } => { + const savedSettings = readSettingsFile(); + const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + // Get autoBuildPath from settings or try to auto-detect + let sourcePath: string | null = settings.autoBuildPath || null; + if (!sourcePath) { + sourcePath = detectAutoBuildSourcePath(); + } + + if (!sourcePath) { + return { sourcePath: null, envPath: null, isProduction: !is.dev }; + } + + // In production, use userData directory for .env since resources may be read-only + // In development, use the actual source path + let envPath: string; + if (is.dev) { + envPath = path.join(sourcePath, '.env'); + } else { + // Production: store .env in userData/backend/.env + const userDataBackendDir = path.join(app.getPath('userData'), 'backend'); + if (!existsSync(userDataBackendDir)) { + mkdirSync(userDataBackendDir, { recursive: true }); + } + envPath = path.join(userDataBackendDir, '.env'); + } + + return { + sourcePath, + envPath, + isProduction: !is.dev + }; + }; + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET, + async (): Promise> => { + try { + const { sourcePath, envPath } = getSourceEnvPath(); + + // Load global settings to check for global token fallback + const savedSettings = readSettingsFile(); + const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + if (!sourcePath) { + // Even without source path, check global token + const globalToken = globalSettings.globalClaudeOAuthToken; + return { + success: true, + data: { + hasClaudeToken: !!globalToken && globalToken.length > 0, + claudeOAuthToken: globalToken, + envExists: false + } + }; + } + + const envExists = envPath ? existsSync(envPath) : false; + let hasClaudeToken = false; + let claudeOAuthToken: string | undefined; + + // First, check source .env file + if (envExists && envPath) { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + claudeOAuthToken = vars['CLAUDE_CODE_OAUTH_TOKEN']; + hasClaudeToken = !!claudeOAuthToken && claudeOAuthToken.length > 0; + } + + // Fallback to global settings if no token in source .env + if (!hasClaudeToken && globalSettings.globalClaudeOAuthToken) { + claudeOAuthToken = globalSettings.globalClaudeOAuthToken; + hasClaudeToken = true; + } + + return { + success: true, + data: { + hasClaudeToken, + claudeOAuthToken, + sourcePath, + envExists + } + }; + } catch (error) { + // Log the error for debugging in production + console.error('[AUTOBUILD_SOURCE_ENV_GET] Error:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to get source env' + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, + async (_, config: { claudeOAuthToken?: string }): Promise => { + try { + const { sourcePath, envPath } = getSourceEnvPath(); + + if (!sourcePath || !envPath) { + return { + success: false, + error: 'Auto-build source path not configured. Please set it in Settings.' + }; + } + + // Read existing content or start fresh (avoiding TOCTOU race condition) + let existingVars: Record = {}; + try { + const content = readFileSync(envPath, 'utf-8'); + existingVars = parseEnvFile(content); + } catch (_readError) { + // File doesn't exist or can't be read - start with empty vars + // This is expected for first-time setup + } + + // Update with new values + if (config.claudeOAuthToken !== undefined) { + existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken; + } + + // Generate content + const lines: string[] = [ + '# Auto Claude Framework Environment Variables', + '# Managed by Auto Claude UI', + '', + '# Claude Code OAuth Token (REQUIRED)', + `CLAUDE_CODE_OAUTH_TOKEN=${existingVars['CLAUDE_CODE_OAUTH_TOKEN'] || ''}`, + '' + ]; + + // Preserve other existing variables + for (const [key, value] of Object.entries(existingVars)) { + if (key !== 'CLAUDE_CODE_OAUTH_TOKEN') { + lines.push(`${key}=${value}`); + } + } + + writeFileSync(envPath, lines.join('\n')); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to update source env' + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN, + async (): Promise> => { + try { + const { sourcePath, envPath, isProduction } = getSourceEnvPath(); + + // Load global settings to check for global token fallback + const savedSettings = readSettingsFile(); + const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + // Check global token first as it's the primary method + const globalToken = globalSettings.globalClaudeOAuthToken; + const hasGlobalToken = !!globalToken && globalToken.length > 0; + + if (!sourcePath) { + // In production, no source path is acceptable if global token exists + if (hasGlobalToken) { + return { + success: true, + data: { + hasToken: true, + sourcePath: isProduction ? app.getPath('userData') : undefined + } + }; + } + return { + success: true, + data: { + hasToken: false, + error: isProduction + ? 'Please configure Claude OAuth token in Settings > API Configuration' + : 'Auto-build source path not configured' + } + }; + } + + // Check source .env file + let hasEnvToken = false; + if (envPath && existsSync(envPath)) { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + const token = vars['CLAUDE_CODE_OAUTH_TOKEN']; + hasEnvToken = !!token && token.length > 0; + } + + // Token exists if either source .env has it OR global settings has it + const hasToken = hasEnvToken || hasGlobalToken; + + return { + success: true, + data: { + hasToken, + sourcePath + } + }; + } catch (error) { + // Log the error for debugging in production + console.error('[AUTOBUILD_SOURCE_ENV_CHECK_TOKEN] Error:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to check source token' + }; + } + } + ); } diff --git a/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts new file mode 100644 index 0000000000..d51ee6fbdd --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts @@ -0,0 +1,34 @@ +/** + * Shared label matching utilities + * Used by both GitHub and GitLab spec-utils for category detection + */ + +/** + * Escape special regex characters in a string. + * This ensures that terms like "c++" or "c#" are matched literally. + * + * @param str - The string to escape + * @returns The escaped string safe for use in a RegExp + */ +function escapeRegExp(str: string): string { + return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); +} + +/** + * Check if a label contains a whole-word match for a term. + * Uses word boundaries to prevent false positives (e.g., 'acid' matching 'ci'). + * + * The term is escaped to handle regex metacharacters safely, so terms like + * "c++" or "c#" are matched literally rather than being interpreted as regex. + * + * @param label - The label to check (already lowercased) + * @param term - The term to search for (will be escaped for regex safety) + * @returns true if the label contains the term as a whole word + */ +export function labelMatchesWholeWord(label: string, term: string): boolean { + // Escape regex metacharacters in the term to match literally + const escapedTerm = escapeRegExp(term); + // Use word boundary regex to match whole words only + const regex = new RegExp(`\\b${escapedTerm}\\b`); + return regex.test(label); +} diff --git a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts index 232f54bedf..50049f06e8 100644 --- a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts @@ -194,6 +194,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { updatedAt: new Date() }; + // Invalidate cache since a new task was created + projectStore.invalidateTasksCache(projectId); + return { success: true, data: task }; } ); @@ -230,6 +233,10 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { } else { console.warn(`[TASK_DELETE] Spec directory not found: ${specDir}`); } + + // Invalidate cache since a task was deleted + projectStore.invalidateTasksCache(project.id); + return { success: true }; } catch (error) { console.error('[TASK_DELETE] Error deleting spec directory:', error); @@ -418,6 +425,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { updatedAt: new Date() }; + // Invalidate cache since a task was updated + projectStore.invalidateTasksCache(project.id); + return { success: true, data: updatedTask }; } catch (error) { return { diff --git a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts index 1e0ce9ba52..f9ac58fd2a 100644 --- a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts @@ -2,7 +2,7 @@ import { ipcMain, BrowserWindow } from 'electron'; import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { IPCResult, TaskStartOptions, TaskStatus } from '../../../shared/types'; import path from 'path'; -import { existsSync, readFileSync, writeFileSync, mkdirSync, renameSync, unlinkSync } from 'fs'; +import { existsSync, readFileSync, writeFileSync, renameSync, unlinkSync } from 'fs'; import { spawnSync } from 'child_process'; import { AgentManager } from '../../agent'; import { fileWatcher } from '../../file-watcher'; @@ -12,9 +12,10 @@ import { getClaudeProfileManager } from '../../claude-profile-manager'; import { getPlanPath, persistPlanStatus, - persistPlanStatusSync, createPlanIfNotExists } from './plan-file-utils'; +import { findTaskWorktree } from '../../worktree-paths'; +import { projectStore } from '../../project-store'; /** * Atomic file write to prevent TOCTOU race conditions. @@ -236,7 +237,7 @@ export function registerTaskExecutionHandlers( setImmediate(async () => { const persistStart = Date.now(); try { - const persisted = await persistPlanStatus(planPath, 'in_progress'); + const persisted = await persistPlanStatus(planPath, 'in_progress', project.id); if (persisted) { console.warn('[TASK_START] Updated plan status to: in_progress'); } @@ -288,7 +289,7 @@ export function registerTaskExecutionHandlers( setImmediate(async () => { const persistStart = Date.now(); try { - const persisted = await persistPlanStatus(planPath, 'backlog'); + const persisted = await persistPlanStatus(planPath, 'backlog', project.id); if (persisted) { console.warn('[TASK_STOP] Updated plan status to backlog'); } @@ -332,9 +333,9 @@ export function registerTaskExecutionHandlers( ); // Check if worktree exists - QA needs to run in the worktree where the build happened - const worktreePath = path.join(project.path, '.worktrees', task.specId); - const worktreeSpecDir = path.join(worktreePath, specsBaseDir, task.specId); - const hasWorktree = existsSync(worktreePath); + const worktreePath = findTaskWorktree(project.path, task.specId); + const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null; + const hasWorktree = worktreePath !== null; if (approved) { // Write approval to QA report @@ -382,14 +383,14 @@ export function registerTaskExecutionHandlers( } // Step 3: Clean untracked files that came from the merge - // IMPORTANT: Exclude .auto-claude and .worktrees directories to preserve specs and worktree data - const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude', '-e', '.worktrees'], { + // IMPORTANT: Exclude .auto-claude directory to preserve specs and worktree data + const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude'], { cwd: project.path, encoding: 'utf-8', stdio: 'pipe' }); if (cleanResult.status === 0) { - console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude and .worktrees)'); + console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude)'); } console.log('[TASK_REVIEW] Main branch restored to pre-merge state'); @@ -397,7 +398,7 @@ export function registerTaskExecutionHandlers( // Write feedback for QA fixer - write to WORKTREE spec dir if it exists // The QA process runs in the worktree where the build and implementation_plan.json are - const targetSpecDir = hasWorktree ? worktreeSpecDir : specDir; + const targetSpecDir = hasWorktree && worktreeSpecDir ? worktreeSpecDir : specDir; const fixRequestPath = path.join(targetSpecDir, 'QA_FIX_REQUEST.md'); console.warn('[TASK_REVIEW] Writing QA fix request to:', fixRequestPath); @@ -453,9 +454,9 @@ export function registerTaskExecutionHandlers( // Validate status transition - 'done' can only be set through merge handler // UNLESS there's no worktree (limbo state - already merged/discarded or failed) if (status === 'done') { - // Check if worktree exists - const worktreePath = path.join(project.path, '.worktrees', taskId); - const hasWorktree = existsSync(worktreePath); + // Check if worktree exists (task.specId matches worktree folder name) + const worktreePath = findTaskWorktree(project.path, task.specId); + const hasWorktree = worktreePath !== null; if (hasWorktree) { // Worktree exists - must use merge workflow @@ -508,11 +509,13 @@ export function registerTaskExecutionHandlers( try { // Use shared utility for thread-safe plan file updates - const persisted = await persistPlanStatus(planPath, status); + const persisted = await persistPlanStatus(planPath, status, project.id); if (!persisted) { // If no implementation plan exists yet, create a basic one await createPlanIfNotExists(planPath, task, status); + // Invalidate cache after creating new plan + projectStore.invalidateTasksCache(project.id); } // Auto-stop task when status changes AWAY from 'in_progress' and process IS running @@ -671,17 +674,35 @@ export function registerTaskExecutionHandlers( return { success: false, error: 'Task not found' }; } - // Get the spec directory - const autoBuildDir = project.autoBuildPath || '.auto-claude'; - const specDir = path.join( + // Get the spec directory - use task.specsPath if available (handles worktree vs main) + // This is critical: task might exist in worktree, and getTasks() prefers worktree version. + // If we write to main project but task is in worktree, the worktree's old status takes precedence on refresh. + const specDir = task.specsPath || path.join( project.path, - autoBuildDir, - 'specs', + getSpecsDir(project.autoBuildPath), task.specId ); // Update implementation_plan.json const planPath = path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN); + console.log(`[Recovery] Writing to plan file at: ${planPath} (task location: ${task.location || 'main'})`); + + // Also update the OTHER location if task exists in both main and worktree + // This ensures consistency regardless of which version getTasks() prefers + const specsBaseDir = getSpecsDir(project.autoBuildPath); + const mainSpecDir = path.join(project.path, specsBaseDir, task.specId); + const worktreePath = findTaskWorktree(project.path, task.specId); + const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null; + + // Collect all plan file paths that need updating + const planPathsToUpdate: string[] = [planPath]; + if (mainSpecDir !== specDir && existsSync(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) { + planPathsToUpdate.push(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN)); + } + if (worktreeSpecDir && worktreeSpecDir !== specDir && existsSync(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) { + planPathsToUpdate.push(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN)); + } + console.log(`[Recovery] Will update ${planPathsToUpdate.length} plan file(s):`, planPathsToUpdate); try { // Read the plan to analyze subtask progress @@ -743,14 +764,25 @@ export function registerTaskExecutionHandlers( // Just update status in plan file (project store reads from file, no separate update needed) plan.status = 'human_review'; plan.planStatus = 'review'; - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file:', writeError); + + // Write to ALL plan file locations to ensure consistency + const planContent = JSON.stringify(plan, null, 2); + let writeSucceededForComplete = false; + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, planContent); + console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`); + writeSucceededForComplete = true; + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError); + // Continue trying other paths + } + } + + if (!writeSucceededForComplete) { return { success: false, - error: 'Failed to write plan file' + error: 'Failed to write plan file during recovery (all locations failed)' }; } @@ -797,11 +829,19 @@ export function registerTaskExecutionHandlers( } } - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file:', writeError); + // Write to ALL plan file locations to ensure consistency + const planContent = JSON.stringify(plan, null, 2); + let writeSucceeded = false; + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, planContent); + console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`); + writeSucceeded = true; + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError); + } + } + if (!writeSucceeded) { return { success: false, error: 'Failed to write plan file during recovery' @@ -853,17 +893,20 @@ export function registerTaskExecutionHandlers( // Set status to in_progress for the restart newStatus = 'in_progress'; - // Update plan status for restart + // Update plan status for restart - write to ALL locations if (plan) { plan.status = 'in_progress'; plan.planStatus = 'in_progress'; - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file for restart:', writeError); - // Continue with restart attempt even if file write fails - // The plan status will be updated by the agent when it starts + const restartPlanContent = JSON.stringify(plan, null, 2); + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, restartPlanContent); + console.log(`[Recovery] Wrote restart status to: ${pathToUpdate}`); + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file for restart at ${pathToUpdate}:`, writeError); + // Continue with restart attempt even if file write fails + // The plan status will be updated by the agent when it starts + } } } diff --git a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts index 6d810f3aea..933d0c5a00 100644 --- a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts @@ -21,6 +21,7 @@ import path from 'path'; import { readFileSync, writeFileSync, mkdirSync } from 'fs'; import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { TaskStatus, Project, Task } from '../../../shared/types'; +import { projectStore } from '../../project-store'; // In-memory locks for plan file operations // Key: plan file path, Value: Promise chain for serializing operations @@ -93,9 +94,10 @@ export function mapStatusToPlanStatus(status: TaskStatus): string { * * @param planPath - Path to the implementation_plan.json file * @param status - The TaskStatus to persist + * @param projectId - Optional project ID to invalidate cache (recommended for performance) * @returns true if status was persisted, false if plan file doesn't exist */ -export async function persistPlanStatus(planPath: string, status: TaskStatus): Promise { +export async function persistPlanStatus(planPath: string, status: TaskStatus, projectId?: string): Promise { return withPlanLock(planPath, async () => { try { // Read file directly without existence check to avoid TOCTOU race condition @@ -107,6 +109,12 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P plan.updated_at = new Date().toISOString(); writeFileSync(planPath, JSON.stringify(plan, null, 2)); + + // Invalidate tasks cache since status changed + if (projectId) { + projectStore.invalidateTasksCache(projectId); + } + return true; } catch (err) { // File not found is expected - return false @@ -141,9 +149,10 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P * * @param planPath - Path to the implementation_plan.json file * @param status - The TaskStatus to persist + * @param projectId - Optional project ID to invalidate cache (recommended for performance) * @returns true if status was persisted, false otherwise */ -export function persistPlanStatusSync(planPath: string, status: TaskStatus): boolean { +export function persistPlanStatusSync(planPath: string, status: TaskStatus, projectId?: string): boolean { try { // Read file directly without existence check to avoid TOCTOU race condition const planContent = readFileSync(planPath, 'utf-8'); @@ -154,6 +163,12 @@ export function persistPlanStatusSync(planPath: string, status: TaskStatus): boo plan.updated_at = new Date().toISOString(); writeFileSync(planPath, JSON.stringify(plan, null, 2)); + + // Invalidate tasks cache since status changed + if (projectId) { + projectStore.invalidateTasksCache(projectId); + } + return true; } catch (err) { // File not found is expected - return false diff --git a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts index a9edf89c6f..c00ee1f94d 100644 --- a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts @@ -4,14 +4,19 @@ import type { IPCResult, WorktreeStatus, WorktreeDiff, WorktreeDiffFile, Worktre import path from 'path'; import { existsSync, readdirSync, statSync, readFileSync } from 'fs'; import { execSync, execFileSync, spawn, spawnSync, exec, execFile } from 'child_process'; +import { minimatch } from 'minimatch'; import { projectStore } from '../../project-store'; import { getConfiguredPythonPath, PythonEnvManager, pythonEnvManager as pythonEnvManagerSingleton } from '../../python-env-manager'; -import { getEffectiveSourcePath } from '../../auto-claude-updater'; +import { getEffectiveSourcePath } from '../../updater/path-resolver'; import { getProfileEnv } from '../../rate-limit-detector'; import { findTaskAndProject } from './shared'; import { parsePythonCommand } from '../../python-detector'; import { getToolPath } from '../../cli-tool-manager'; import { promisify } from 'util'; +import { + getTaskWorktreeDir, + findTaskWorktree, +} from '../../worktree-paths'; /** * Read utility feature settings (for commit message, merge resolver) from settings file @@ -55,6 +60,145 @@ function getUtilitySettings(): { model: string; modelId: string; thinkingLevel: const execAsync = promisify(exec); const execFileAsync = promisify(execFile); +/** + * Check if a repository is misconfigured as bare but has source files. + * If so, automatically fix the configuration by unsetting core.bare. + * + * This can happen when git worktree operations incorrectly set bare=true, + * or when users manually misconfigure the repository. + * + * @param projectPath - Path to check and potentially fix + * @returns true if fixed, false if no fix needed or not fixable + */ +function fixMisconfiguredBareRepo(projectPath: string): boolean { + try { + // Check if bare=true is set + const bareConfig = execFileSync( + getToolPath('git'), + ['config', '--get', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ).trim().toLowerCase(); + + if (bareConfig !== 'true') { + return false; // Not marked as bare, nothing to fix + } + + // Check if there are source files (indicating misconfiguration) + // A truly bare repo would only have git internals, not source code + // This covers multiple ecosystems: JS/TS, Python, Rust, Go, Java, C#, etc. + // + // Markers are separated into exact matches and glob patterns for efficiency. + // Exact matches use existsSync() directly, while glob patterns use minimatch + // against a cached directory listing. + const EXACT_MARKERS = [ + // JavaScript/TypeScript ecosystem + 'package.json', 'apps', 'src', + // Python ecosystem + 'pyproject.toml', 'setup.py', 'requirements.txt', 'Pipfile', + // Rust ecosystem + 'Cargo.toml', + // Go ecosystem + 'go.mod', 'go.sum', 'cmd', 'main.go', + // Java/JVM ecosystem + 'pom.xml', 'build.gradle', 'build.gradle.kts', + // Ruby ecosystem + 'Gemfile', 'Rakefile', + // PHP ecosystem + 'composer.json', + // General project markers + 'Makefile', 'CMakeLists.txt', 'README.md', 'LICENSE' + ]; + + const GLOB_MARKERS = [ + // .NET/C# ecosystem - patterns that need glob matching + '*.csproj', '*.sln', '*.fsproj' + ]; + + // Check exact matches first (fast path) + const hasExactMatch = EXACT_MARKERS.some(marker => + existsSync(path.join(projectPath, marker)) + ); + + if (hasExactMatch) { + // Found a project marker, proceed to fix + } else { + // Check glob patterns - read directory once and cache for all patterns + let directoryFiles: string[] | null = null; + const MAX_FILES_TO_CHECK = 500; // Limit to avoid reading huge directories + + const hasGlobMatch = GLOB_MARKERS.some(pattern => { + // Validate pattern - only support simple glob patterns for security + if (pattern.includes('..') || pattern.includes('/')) { + console.warn(`[GIT] Unsupported glob pattern ignored: ${pattern}`); + return false; + } + + // Lazy-load directory listing, cached across patterns + if (directoryFiles === null) { + try { + const allFiles = readdirSync(projectPath); + // Limit to first N entries to avoid performance issues + directoryFiles = allFiles.slice(0, MAX_FILES_TO_CHECK); + if (allFiles.length > MAX_FILES_TO_CHECK) { + console.warn(`[GIT] Directory has ${allFiles.length} entries, checking only first ${MAX_FILES_TO_CHECK}`); + } + } catch (error) { + // Log the error for debugging instead of silently swallowing + console.warn(`[GIT] Failed to read directory ${projectPath}:`, error instanceof Error ? error.message : String(error)); + directoryFiles = []; + } + } + + // Use minimatch for proper glob pattern matching + return directoryFiles.some(file => minimatch(file, pattern, { nocase: true })); + }); + + if (!hasGlobMatch) { + return false; // Legitimately bare repo + } + } + + // Fix the misconfiguration + console.warn('[GIT] Detected misconfigured bare repository with source files. Auto-fixing by unsetting core.bare...'); + execFileSync( + getToolPath('git'), + ['config', '--unset', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + console.warn('[GIT] Fixed: core.bare has been unset. Git operations should now work correctly.'); + return true; + } catch { + return false; + } +} + +/** + * Check if a path is a valid git working tree (not a bare repository). + * Returns true if the path is inside a git repository with a working tree. + * + * NOTE: This is a pure check with no side-effects. If you need to fix + * misconfigured bare repos before an operation, call fixMisconfiguredBareRepo() + * explicitly before calling this function. + * + * @param projectPath - Path to check + * @returns true if it's a valid working tree, false if bare or not a git repo + */ +function isGitWorkTree(projectPath: string): boolean { + try { + // Use git rev-parse --is-inside-work-tree which returns "true" for working trees + // and fails for bare repos or non-git directories + const result = execFileSync( + getToolPath('git'), + ['rev-parse', '--is-inside-work-tree'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + return result.trim() === 'true'; + } catch { + // Not a working tree (could be bare repo or not a git repo at all) + return false; + } +} + /** * IDE and Terminal detection and launching utilities */ @@ -674,12 +818,14 @@ const TERMINAL_DETECTION: Partial '\'' +function escapeSingleQuotedPath(dirPath: string): string { + // Single quotes are escaped by ending the string, adding an escaped quote, + // and starting a new string: ' -> '\'' + // This pattern works in both AppleScript and POSIX shells (bash, sh, zsh) return dirPath.replace(/'/g, "'\\''"); } @@ -1069,8 +1215,8 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust if (platform === 'darwin') { // macOS: Use open command with the directory - // Escape single quotes in dirPath to prevent AppleScript injection - const escapedPath = escapeAppleScriptPath(dirPath); + // Escape single quotes in dirPath to prevent script injection + const escapedPath = escapeSingleQuotedPath(dirPath); if (terminal === 'system') { // Use AppleScript to open Terminal.app at the directory @@ -1112,7 +1258,7 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust } catch { // xterm doesn't have --working-directory, use -e with a script // Escape the path for shell use within the xterm command - const escapedPath = escapeAppleScriptPath(dirPath); + const escapedPath = escapeSingleQuotedPath(dirPath); await execFileAsync('xterm', ['-e', `cd '${escapedPath}' && bash`]); } } @@ -1158,7 +1304,7 @@ export function registerWorktreeHandlers( ): void { /** * Get the worktree status for a task - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_STATUS, @@ -1169,10 +1315,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: true, data: { exists: false } @@ -1268,7 +1414,7 @@ export function registerWorktreeHandlers( /** * Get the diff for a task's worktree - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_DIFF, @@ -1279,10 +1425,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: false, error: 'No worktree found for this task' }; } @@ -1400,6 +1546,12 @@ export function registerWorktreeHandlers( debug('Found task:', task.specId, 'project:', project.path); + // Auto-fix any misconfigured bare repo before merge operation + // This prevents issues where git operations fail due to incorrect bare=true config + if (fixMisconfiguredBareRepo(project.path)) { + debug('Fixed misconfigured bare repository at:', project.path); + } + // Use run.py --merge to handle the merge const sourcePath = getEffectiveSourcePath(); if (!sourcePath) { @@ -1415,8 +1567,8 @@ export function registerWorktreeHandlers( } // Check worktree exists before merge - const worktreePath = path.join(project.path, '.worktrees', task.specId); - debug('Worktree path:', worktreePath, 'exists:', existsSync(worktreePath)); + const worktreePath = findTaskWorktree(project.path, task.specId); + debug('Worktree path:', worktreePath, 'exists:', !!worktreePath); // Check if changes are already staged (for stage-only mode) if (options?.noCommit) { @@ -1443,14 +1595,18 @@ export function registerWorktreeHandlers( } } - // Get git status before merge - try { - const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); - debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)'); - const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim(); - debug('Current branch:', gitBranch); - } catch (e) { - debug('Failed to get git status before:', e); + // Get git status before merge (only if project is a working tree, not a bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); + debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)'); + const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim(); + debug('Current branch:', gitBranch); + } catch (e) { + debug('Failed to get git status before:', e); + } + } else { + debug('Project is a bare repository - skipping pre-merge git status check'); } const args = [ @@ -1594,14 +1750,18 @@ export function registerWorktreeHandlers( debug('Full stdout:', stdout); debug('Full stderr:', stderr); - // Get git status after merge - try { - const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); - debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)'); - const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); - debug('Staged changes:\n', gitDiffStaged || '(none)'); - } catch (e) { - debug('Failed to get git status after:', e); + // Get git status after merge (only if project is a working tree, not a bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); + debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)'); + const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); + debug('Staged changes:\n', gitDiffStaged || '(none)'); + } catch (e) { + debug('Failed to get git status after:', e); + } + } else { + debug('Project is a bare repository - skipping git status check (this is normal for worktree-based projects)'); } if (code === 0) { @@ -1613,33 +1773,39 @@ export function registerWorktreeHandlers( let mergeAlreadyCommitted = false; if (isStageOnly) { - try { - const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); - hasActualStagedChanges = gitDiffStaged.trim().length > 0; - debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges); - - if (!hasActualStagedChanges) { - // Check if worktree branch was already merged (merge commit exists) - const specBranch = `auto-claude/${task.specId}`; - try { - // Check if current branch contains all commits from spec branch - // git merge-base --is-ancestor returns exit code 0 if true, 1 if false - execFileSync( - 'git', - ['merge-base', '--is-ancestor', specBranch, 'HEAD'], - { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } - ); - // If we reach here, the command succeeded (exit code 0) - branch is merged - mergeAlreadyCommitted = true; - debug('Merge already committed check:', mergeAlreadyCommitted); - } catch { - // Exit code 1 means not merged, or branch may not exist - mergeAlreadyCommitted = false; - debug('Could not check merge status, assuming not merged'); + // Only check staged changes if project is a working tree (not bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); + hasActualStagedChanges = gitDiffStaged.trim().length > 0; + debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges); + + if (!hasActualStagedChanges) { + // Check if worktree branch was already merged (merge commit exists) + const specBranch = `auto-claude/${task.specId}`; + try { + // Check if current branch contains all commits from spec branch + // git merge-base --is-ancestor returns exit code 0 if true, 1 if false + execFileSync( + getToolPath('git'), + ['merge-base', '--is-ancestor', specBranch, 'HEAD'], + { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + // If we reach here, the command succeeded (exit code 0) - branch is merged + mergeAlreadyCommitted = true; + debug('Merge already committed check:', mergeAlreadyCommitted); + } catch { + // Exit code 1 means not merged, or branch may not exist + mergeAlreadyCommitted = false; + debug('Could not check merge status, assuming not merged'); + } } + } catch (e) { + debug('Failed to verify staged changes:', e); } - } catch (e) { - debug('Failed to verify staged changes:', e); + } else { + // For bare repos, skip staging verification - merge happens in worktree + debug('Project is a bare repository - skipping staged changes verification'); } } @@ -1657,6 +1823,33 @@ export function registerWorktreeHandlers( message = 'Changes were already merged and committed. Task marked as done.'; staged = false; debug('Stage-only requested but merge already committed. Marking as done.'); + + // Clean up worktree since merge is complete (fixes #243) + // This is the same cleanup as the full merge path, needed because + // stageOnly defaults to true for human_review tasks + try { + if (worktreePath && existsSync(worktreePath)) { + execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Worktree cleaned up (already merged):', worktreePath); + + // Also delete the task branch + const taskBranch = `auto-claude/${task.specId}`; + try { + execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Task branch deleted:', taskBranch); + } catch { + // Branch might not exist or already deleted + } + } + } catch (cleanupErr) { + debug('Worktree cleanup failed (non-fatal):', cleanupErr); + } } else if (isStageOnly && !hasActualStagedChanges) { // Stage-only was requested but no changes to stage (and not committed) // This could mean nothing to merge or an error - keep in human_review for investigation @@ -1677,6 +1870,33 @@ export function registerWorktreeHandlers( planStatus = 'completed'; message = 'Changes merged successfully'; staged = false; + + // Clean up worktree after successful full merge (fixes #243) + // This allows drag-to-Done workflow since TASK_UPDATE_STATUS blocks 'done' when worktree exists + try { + if (worktreePath && existsSync(worktreePath)) { + execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Worktree cleaned up after full merge:', worktreePath); + + // Also delete the task branch since we merged successfully + const taskBranch = `auto-claude/${task.specId}`; + try { + execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Task branch deleted:', taskBranch); + } catch { + // Branch might not exist or already deleted + } + } + } catch (cleanupErr) { + debug('Worktree cleanup failed (non-fatal):', cleanupErr); + // Non-fatal - merge succeeded, cleanup can be done manually + } } debug('Merge result. isStageOnly:', isStageOnly, 'newStatus:', newStatus, 'staged:', staged); @@ -1701,10 +1921,15 @@ export function registerWorktreeHandlers( // Issue #243: We must update BOTH the main project's plan AND the worktree's plan (if it exists) // because ProjectStore prefers the worktree version when deduplicating tasks. // OPTIMIZATION: Use async I/O and parallel updates to prevent UI blocking - const planPaths = [ + // NOTE: The worktree has the same directory structure as main project + const planPaths: { path: string; isMain: boolean }[] = [ { path: path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: true }, - { path: path.join(worktreePath, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false } ]; + // Add worktree plan path if worktree exists + if (worktreePath) { + const worktreeSpecDir = path.join(worktreePath, project.autoBuildPath || '.auto-claude', 'specs', task.specId); + planPaths.push({ path: path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false }); + } const { promises: fsPromises } = require('fs'); @@ -1766,8 +1991,15 @@ export function registerWorktreeHandlers( } }; - // Run async updates without blocking the response - updatePlans().catch(err => debug('Background plan update failed:', err)); + // IMPORTANT: Wait for plan updates to complete before responding (fixes #243) + // Previously this was "fire and forget" which caused a race condition: + // resolve() would return before files were written, and UI refresh would read old status + try { + await updatePlans(); + } catch (err) { + debug('Plan update failed:', err); + // Non-fatal: UI will still update, but status may not persist across refresh + } const mainWindow = getMainWindow(); if (mainWindow) { @@ -1785,8 +2017,17 @@ export function registerWorktreeHandlers( } }); } else { - // Check if there were conflicts - const hasConflicts = stdout.includes('conflict') || stderr.includes('conflict'); + // Check if there were actual merge conflicts + // More specific patterns to avoid false positives from debug output like "files_with_conflicts: 0" + const conflictPatterns = [ + /CONFLICT \(/i, // Git merge conflict marker + /merge conflict/i, // Explicit merge conflict message + /\bconflict detected\b/i, // Our own conflict detection message + /\bconflicts? found\b/i, // "conflicts found" or "conflict found" + /Automatic merge failed/i, // Git's automatic merge failure message + ]; + const combinedOutput = stdout + stderr; + const hasConflicts = conflictPatterns.some(pattern => pattern.test(combinedOutput)); debug('Merge failed. hasConflicts:', hasConflicts); resolve({ @@ -1863,27 +2104,31 @@ export function registerWorktreeHandlers( } console.warn('[IPC] Found task:', task.specId, 'project:', project.name); - // Check for uncommitted changes in the main project + // Check for uncommitted changes in the main project (only if not a bare repo) let hasUncommittedChanges = false; let uncommittedFiles: string[] = []; - try { - const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], { - cwd: project.path, - encoding: 'utf-8' - }); + if (isGitWorkTree(project.path)) { + try { + const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], { + cwd: project.path, + encoding: 'utf-8' + }); - if (gitStatus && gitStatus.trim()) { - // Parse the status output to get file names - // Format: XY filename (where X and Y are status chars, then space, then filename) - uncommittedFiles = gitStatus - .split('\n') - .filter(line => line.trim()) - .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace + if (gitStatus && gitStatus.trim()) { + // Parse the status output to get file names + // Format: XY filename (where X and Y are status chars, then space, then filename) + uncommittedFiles = gitStatus + .split('\n') + .filter(line => line.trim()) + .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace - hasUncommittedChanges = uncommittedFiles.length > 0; + hasUncommittedChanges = uncommittedFiles.length > 0; + } + } catch (e) { + console.error('[IPC] Failed to check git status:', e); } - } catch (e) { - console.error('[IPC] Failed to check git status:', e); + } else { + console.warn('[IPC] Project is a bare repository - skipping uncommitted changes check'); } const sourcePath = getEffectiveSourcePath(); @@ -2012,7 +2257,7 @@ export function registerWorktreeHandlers( /** * Discard the worktree changes - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_DISCARD, @@ -2023,10 +2268,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: true, data: { @@ -2090,7 +2335,7 @@ export function registerWorktreeHandlers( /** * List all spec worktrees for a project - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_LIST_WORKTREES, @@ -2101,23 +2346,11 @@ export function registerWorktreeHandlers( return { success: false, error: 'Project not found' }; } - const worktreesDir = path.join(project.path, '.worktrees'); const worktrees: WorktreeListItem[] = []; + const worktreesDir = getTaskWorktreeDir(project.path); - if (!existsSync(worktreesDir)) { - return { success: true, data: { worktrees } }; - } - - // Get all directories in .worktrees - const entries = readdirSync(worktreesDir); - for (const entry of entries) { - const entryPath = path.join(worktreesDir, entry); - const stat = statSync(entryPath); - - // Skip worker directories and non-directories - if (!stat.isDirectory() || entry.startsWith('worker-')) { - continue; - } + // Helper to process a single worktree entry + const processWorktreeEntry = (entry: string, entryPath: string) => { try { // Get branch info @@ -2188,6 +2421,22 @@ export function registerWorktreeHandlers( console.error(`Error getting info for worktree ${entry}:`, gitError); // Skip this worktree if we can't get git info } + }; + + // Scan worktrees directory + if (existsSync(worktreesDir)) { + const entries = readdirSync(worktreesDir); + for (const entry of entries) { + const entryPath = path.join(worktreesDir, entry); + try { + const stat = statSync(entryPath); + if (stat.isDirectory()) { + processWorktreeEntry(entry, entryPath); + } + } catch { + // Skip entries that can't be stat'd + } + } } return { success: true, data: { worktrees } }; diff --git a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts index b76d136314..d68e8ab92f 100644 --- a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts @@ -9,6 +9,7 @@ import { projectStore } from '../project-store'; import { terminalNameGenerator } from '../terminal-name-generator'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { escapeShellArg, escapeShellArgWindows } from '../../shared/utils/shell-escape'; +import { getClaudeCliInvocationAsync } from '../claude-cli-utils'; /** @@ -53,7 +54,10 @@ export function registerTerminalHandlers( ipcMain.on( IPC_CHANNELS.TERMINAL_INVOKE_CLAUDE, (_, id: string, cwd?: string) => { - terminalManager.invokeClaude(id, cwd); + // Use async version to avoid blocking main process during CLI detection + terminalManager.invokeClaudeAsync(id, cwd).catch((error) => { + console.error('[terminal-handlers] Failed to invoke Claude:', error); + }); } ); @@ -76,6 +80,22 @@ export function registerTerminalHandlers( } ); + // Set terminal title (user renamed terminal in renderer) + ipcMain.on( + IPC_CHANNELS.TERMINAL_SET_TITLE, + (_, id: string, title: string) => { + terminalManager.setTitle(id, title); + } + ); + + // Set terminal worktree config (user changed worktree association in renderer) + ipcMain.on( + IPC_CHANNELS.TERMINAL_SET_WORKTREE_CONFIG, + (_, id: string, config: import('../../shared/types').TerminalWorktreeConfig | undefined) => { + terminalManager.setWorktreeConfig(id, config); + } + ); + // Claude profile management (multi-account support) ipcMain.handle( IPC_CHANNELS.CLAUDE_PROFILES_GET, @@ -321,7 +341,15 @@ export function registerTerminalHandlers( }); // Create a new terminal for the login process - await terminalManager.create({ id: terminalId, cwd: homeDir }); + const createResult = await terminalManager.create({ id: terminalId, cwd: homeDir }); + + // If terminal creation failed, return the error + if (!createResult.success) { + return { + success: false, + error: createResult.error || 'Failed to create terminal for authentication' + }; + } // Wait a moment for the terminal to initialize await new Promise(resolve => setTimeout(resolve, 500)); @@ -329,20 +357,30 @@ export function registerTerminalHandlers( // Build the login command with the profile's config dir // Use platform-specific syntax and escaping for environment variables let loginCommand: string; + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const pathPrefix = claudeEnv.PATH + ? (process.platform === 'win32' + ? `set "PATH=${escapeShellArgWindows(claudeEnv.PATH)}" && ` + : `export PATH=${escapeShellArg(claudeEnv.PATH)} && `) + : ''; + const shellClaudeCmd = process.platform === 'win32' + ? `"${escapeShellArgWindows(claudeCmd)}"` + : escapeShellArg(claudeCmd); + if (!profile.isDefault && profile.configDir) { if (process.platform === 'win32') { // SECURITY: Use Windows-specific escaping for cmd.exe const escapedConfigDir = escapeShellArgWindows(profile.configDir); // Windows cmd.exe syntax: set "VAR=value" with %VAR% for expansion - loginCommand = `set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && claude setup-token`; + loginCommand = `${pathPrefix}set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && ${shellClaudeCmd} setup-token`; } else { // SECURITY: Use POSIX escaping for bash/zsh const escapedConfigDir = escapeShellArg(profile.configDir); // Unix/Mac bash/zsh syntax: export VAR=value with $VAR for expansion - loginCommand = `export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && claude setup-token`; + loginCommand = `${pathPrefix}export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && ${shellClaudeCmd} setup-token`; } } else { - loginCommand = 'claude setup-token'; + loginCommand = `${pathPrefix}${shellClaudeCmd} setup-token`; } debugLog('[IPC] Sending login command to terminal:', loginCommand); @@ -350,10 +388,11 @@ export function registerTerminalHandlers( // Write the login command to the terminal terminalManager.write(terminalId, `${loginCommand}\r`); - // Notify the renderer that a login terminal was created + // Notify the renderer that an auth terminal was created + // This allows the UI to display the terminal so users can see the OAuth flow const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send('claude-profile-login-terminal', { + mainWindow.webContents.send(IPC_CHANNELS.TERMINAL_AUTH_CREATED, { terminalId, profileId, profileName: profile.name @@ -599,7 +638,10 @@ export function registerTerminalHandlers( ipcMain.on( IPC_CHANNELS.TERMINAL_RESUME_CLAUDE, (_, id: string, sessionId?: string) => { - terminalManager.resumeClaude(id, sessionId); + // Use async version to avoid blocking main process during CLI detection + terminalManager.resumeClaudeAsync(id, sessionId).catch((error) => { + console.error('[terminal-handlers] Failed to resume Claude:', error); + }); } ); diff --git a/apps/frontend/src/main/ipc-handlers/terminal/index.ts b/apps/frontend/src/main/ipc-handlers/terminal/index.ts new file mode 100644 index 0000000000..3b235fe038 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/terminal/index.ts @@ -0,0 +1,17 @@ +/** + * Terminal handlers module + * + * This module organizes terminal worktree-related IPC handlers: + * - Worktree operations (create, list, remove) + */ + +import { registerTerminalWorktreeHandlers } from './worktree-handlers'; + +/** + * Register all terminal worktree IPC handlers + */ +export function registerTerminalWorktreeIpcHandlers(): void { + registerTerminalWorktreeHandlers(); +} + +export { registerTerminalWorktreeHandlers } from './worktree-handlers'; diff --git a/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts new file mode 100644 index 0000000000..6ebd86f3bd --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts @@ -0,0 +1,386 @@ +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { + IPCResult, + CreateTerminalWorktreeRequest, + TerminalWorktreeConfig, + TerminalWorktreeResult, +} from '../../../shared/types'; +import path from 'path'; +import { existsSync, mkdirSync, writeFileSync, readFileSync, readdirSync, rmSync } from 'fs'; +import { execFileSync } from 'child_process'; +import { debugLog, debugError } from '../../../shared/utils/debug-logger'; +import { projectStore } from '../../project-store'; +import { parseEnvFile } from '../utils'; +import { + getTerminalWorktreeDir, + getTerminalWorktreePath, +} from '../../worktree-paths'; + +// Shared validation regex for worktree names - lowercase alphanumeric with dashes/underscores +// Must start and end with alphanumeric character +const WORKTREE_NAME_REGEX = /^[a-z0-9][a-z0-9_-]*[a-z0-9]$|^[a-z0-9]$/; + +// Validation regex for git branch names - allows alphanumeric, dots, slashes, dashes, underscores +const GIT_BRANCH_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9._/-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$/; + +/** + * Validate that projectPath is a registered project + */ +function isValidProjectPath(projectPath: string): boolean { + const projects = projectStore.getProjects(); + return projects.some(p => p.path === projectPath); +} + +const MAX_TERMINAL_WORKTREES = 12; + +/** + * Get the default branch from project settings OR env config + */ +function getDefaultBranch(projectPath: string): string { + const project = projectStore.getProjects().find(p => p.path === projectPath); + if (project?.settings?.mainBranch) { + debugLog('[TerminalWorktree] Using mainBranch from project settings:', project.settings.mainBranch); + return project.settings.mainBranch; + } + + const envPath = path.join(projectPath, '.auto-claude', '.env'); + if (existsSync(envPath)) { + try { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + if (vars['DEFAULT_BRANCH']) { + debugLog('[TerminalWorktree] Using DEFAULT_BRANCH from env config:', vars['DEFAULT_BRANCH']); + return vars['DEFAULT_BRANCH']; + } + } catch (error) { + debugError('[TerminalWorktree] Error reading env file:', error); + } + } + + for (const branch of ['main', 'master']) { + try { + execFileSync('git', ['rev-parse', '--verify', branch], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Auto-detected branch:', branch); + return branch; + } catch { + // Branch doesn't exist, try next + } + } + + // Fallback to current branch - wrap in try-catch + try { + const currentBranch = execFileSync('git', ['rev-parse', '--abbrev-ref', 'HEAD'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); + debugLog('[TerminalWorktree] Falling back to current branch:', currentBranch); + return currentBranch; + } catch (error) { + debugError('[TerminalWorktree] Error detecting current branch:', error); + return 'main'; // Safe default + } +} + +function saveWorktreeConfig(worktreePath: string, config: TerminalWorktreeConfig): void { + writeFileSync(path.join(worktreePath, 'config.json'), JSON.stringify(config, null, 2)); +} + +function loadWorktreeConfig(worktreePath: string): TerminalWorktreeConfig | null { + const configPath = path.join(worktreePath, 'config.json'); + if (existsSync(configPath)) { + try { + return JSON.parse(readFileSync(configPath, 'utf-8')); + } catch (error) { + debugError('[TerminalWorktree] Corrupted config.json in:', configPath, error); + return null; + } + } + return null; +} + +async function createTerminalWorktree( + request: CreateTerminalWorktreeRequest +): Promise { + const { terminalId, name, taskId, createGitBranch, projectPath, baseBranch: customBaseBranch } = request; + + debugLog('[TerminalWorktree] Creating worktree:', { name, taskId, createGitBranch, projectPath, customBaseBranch }); + + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + return { + success: false, + error: 'Invalid project path', + }; + } + + // Validate worktree name - use shared regex (lowercase only) + if (!WORKTREE_NAME_REGEX.test(name)) { + return { + success: false, + error: 'Invalid worktree name. Use lowercase letters, numbers, dashes, and underscores. Must start and end with alphanumeric.', + }; + } + + // CRITICAL: Validate customBaseBranch to prevent command injection + if (customBaseBranch && !GIT_BRANCH_REGEX.test(customBaseBranch)) { + return { + success: false, + error: 'Invalid base branch name', + }; + } + + const existing = await listTerminalWorktrees(projectPath); + if (existing.length >= MAX_TERMINAL_WORKTREES) { + return { + success: false, + error: `Maximum of ${MAX_TERMINAL_WORKTREES} terminal worktrees reached.`, + }; + } + + const worktreePath = getTerminalWorktreePath(projectPath, name); + const branchName = `terminal/${name}`; + let directoryCreated = false; + + try { + if (existsSync(worktreePath)) { + return { success: false, error: `Worktree '${name}' already exists.` }; + } + + mkdirSync(getTerminalWorktreeDir(projectPath), { recursive: true }); + directoryCreated = true; + + // Use custom base branch if provided, otherwise detect default + const baseBranch = customBaseBranch || getDefaultBranch(projectPath); + debugLog('[TerminalWorktree] Using base branch:', baseBranch, customBaseBranch ? '(custom)' : '(default)'); + + // Check if baseBranch is already a remote ref (e.g., "origin/feature-x") + const isRemoteRef = baseBranch.startsWith('origin/'); + const remoteBranchName = isRemoteRef ? baseBranch.replace('origin/', '') : baseBranch; + + // Fetch the branch from remote + try { + execFileSync('git', ['fetch', 'origin', remoteBranchName], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Fetched latest from origin/' + remoteBranchName); + } catch { + debugLog('[TerminalWorktree] Could not fetch from remote, continuing with local branch'); + } + + // Determine the base ref to use for worktree creation + let baseRef = baseBranch; + if (isRemoteRef) { + // Already a remote ref, use as-is + baseRef = baseBranch; + debugLog('[TerminalWorktree] Using remote ref directly:', baseRef); + } else { + // Check if remote version exists and use it for latest code + try { + execFileSync('git', ['rev-parse', '--verify', `origin/${baseBranch}`], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + baseRef = `origin/${baseBranch}`; + debugLog('[TerminalWorktree] Using remote ref:', baseRef); + } catch { + debugLog('[TerminalWorktree] Remote ref not found, using local branch:', baseBranch); + } + } + + if (createGitBranch) { + execFileSync('git', ['worktree', 'add', '-b', branchName, worktreePath, baseRef], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Created worktree with branch:', branchName, 'from', baseRef); + } else { + execFileSync('git', ['worktree', 'add', '--detach', worktreePath, baseRef], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Created worktree in detached HEAD mode from', baseRef); + } + + const config: TerminalWorktreeConfig = { + name, + worktreePath, + branchName: createGitBranch ? branchName : '', + baseBranch, + hasGitBranch: createGitBranch, + taskId, + createdAt: new Date().toISOString(), + terminalId, + }; + + saveWorktreeConfig(worktreePath, config); + debugLog('[TerminalWorktree] Saved config for worktree:', name); + + return { success: true, config }; + } catch (error) { + debugError('[TerminalWorktree] Error creating worktree:', error); + + // Cleanup: remove the worktree directory if git worktree creation failed + if (directoryCreated && existsSync(worktreePath)) { + try { + rmSync(worktreePath, { recursive: true, force: true }); + debugLog('[TerminalWorktree] Cleaned up failed worktree directory:', worktreePath); + // Also prune stale worktree registrations in case git worktree add partially succeeded + try { + execFileSync('git', ['worktree', 'prune'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Pruned stale worktree registrations'); + } catch { + // Ignore prune errors - not critical + } + } catch (cleanupError) { + debugError('[TerminalWorktree] Failed to cleanup worktree directory:', cleanupError); + } + } + + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to create worktree', + }; + } +} + +async function listTerminalWorktrees(projectPath: string): Promise { + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + debugError('[TerminalWorktree] Invalid project path for listing:', projectPath); + return []; + } + + const configs: TerminalWorktreeConfig[] = []; + const worktreeDir = getTerminalWorktreeDir(projectPath); + + if (existsSync(worktreeDir)) { + try { + for (const dir of readdirSync(worktreeDir, { withFileTypes: true })) { + if (dir.isDirectory()) { + const worktreePath = path.join(worktreeDir, dir.name); + const config = loadWorktreeConfig(worktreePath); + if (config) { + configs.push(config); + } + } + } + } catch (error) { + debugError('[TerminalWorktree] Error listing worktrees:', error); + } + } + + return configs; +} + +async function removeTerminalWorktree( + projectPath: string, + name: string, + deleteBranch: boolean = false +): Promise { + debugLog('[TerminalWorktree] Removing worktree:', { name, deleteBranch, projectPath }); + + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + return { success: false, error: 'Invalid project path' }; + } + + // Validate worktree name to prevent path traversal + if (!WORKTREE_NAME_REGEX.test(name)) { + return { success: false, error: 'Invalid worktree name' }; + } + + const worktreePath = getTerminalWorktreePath(projectPath, name); + const config = loadWorktreeConfig(worktreePath); + + if (!config) { + return { success: false, error: 'Worktree not found' }; + } + + try { + if (existsSync(worktreePath)) { + execFileSync('git', ['worktree', 'remove', '--force', worktreePath], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Removed git worktree'); + } + + if (deleteBranch && config.hasGitBranch && config.branchName) { + // Re-validate branch name from config file (defense in depth - config could be modified) + if (!GIT_BRANCH_REGEX.test(config.branchName)) { + debugError('[TerminalWorktree] Invalid branch name in config:', config.branchName); + } else { + try { + execFileSync('git', ['branch', '-D', config.branchName], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Deleted branch:', config.branchName); + } catch { + debugLog('[TerminalWorktree] Branch not found or already deleted:', config.branchName); + } + } + } + + return { success: true }; + } catch (error) { + debugError('[TerminalWorktree] Error removing worktree:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to remove worktree', + }; + } +} + +export function registerTerminalWorktreeHandlers(): void { + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_CREATE, + async (_, request: CreateTerminalWorktreeRequest): Promise => { + return createTerminalWorktree(request); + } + ); + + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_LIST, + async (_, projectPath: string): Promise> => { + try { + const configs = await listTerminalWorktrees(projectPath); + return { success: true, data: configs }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to list worktrees', + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_REMOVE, + async ( + _, + projectPath: string, + name: string, + deleteBranch: boolean + ): Promise => { + return removeTerminalWorktree(projectPath, name, deleteBranch); + } + ); +} diff --git a/apps/frontend/src/main/memory-env-builder.ts b/apps/frontend/src/main/memory-env-builder.ts index 804c952600..6382757d73 100644 --- a/apps/frontend/src/main/memory-env-builder.ts +++ b/apps/frontend/src/main/memory-env-builder.ts @@ -8,6 +8,7 @@ */ import type { AppSettings } from '../shared/types/settings'; +import { getMemoriesDir } from './config-paths'; /** * Build environment variables for memory/Graphiti configuration from app settings. @@ -26,6 +27,10 @@ export function buildMemoryEnvVars(settings: AppSettings): Record apps/backend path.resolve(__dirname, '..', '..', '..', 'backend', 'query_memory.py'), path.resolve(app.getAppPath(), '..', 'backend', 'query_memory.py'), @@ -112,6 +114,68 @@ function getQueryScriptPath(): string | null { return null; } +/** + * Get the backend venv Python path. + * The backend venv has real_ladybug installed (required for memory operations). + * Falls back to getConfiguredPythonPath() for packaged apps. + */ +function getBackendPythonPath(): string { + // For packaged apps, use the bundled Python which has real_ladybug in site-packages + if (app.isPackaged) { + const fallbackPython = getConfiguredPythonPath(); + console.log(`[MemoryService] Using bundled Python for packaged app: ${fallbackPython}`); + return fallbackPython; + } + + // Development mode: Find the backend venv which has real_ladybug installed + const possibleBackendPaths = [ + path.resolve(__dirname, '..', '..', '..', 'backend'), + path.resolve(app.getAppPath(), '..', 'backend'), + path.resolve(process.cwd(), 'apps', 'backend') + ]; + + for (const backendPath of possibleBackendPaths) { + // Check for backend venv Python (has real_ladybug installed) + const venvPython = process.platform === 'win32' + ? path.join(backendPath, '.venv', 'Scripts', 'python.exe') + : path.join(backendPath, '.venv', 'bin', 'python'); + + if (fs.existsSync(venvPython)) { + console.log(`[MemoryService] Using backend venv Python: ${venvPython}`); + return venvPython; + } + } + + // Fall back to configured Python path + const fallbackPython = getConfiguredPythonPath(); + console.log(`[MemoryService] Backend venv not found, falling back to: ${fallbackPython}`); + return fallbackPython; +} + +/** + * Get the Python environment variables for memory queries. + * This ensures real_ladybug can be found in both dev and packaged modes. + */ +function getMemoryPythonEnv(): Record { + // Start with the standard Python environment from the manager + const baseEnv = pythonEnvManager.getPythonEnv(); + + // For packaged apps, ensure PYTHONPATH includes bundled site-packages + // even if the manager hasn't been fully initialized + if (app.isPackaged) { + const bundledSitePackages = path.join(process.resourcesPath, 'python-site-packages'); + if (fs.existsSync(bundledSitePackages)) { + // Merge paths: bundled site-packages takes precedence + const existingPath = baseEnv.PYTHONPATH || ''; + baseEnv.PYTHONPATH = existingPath + ? `${bundledSitePackages}${path.delimiter}${existingPath}` + : bundledSitePackages; + } + } + + return baseEnv; +} + /** * Execute a Python memory query command */ @@ -120,7 +184,10 @@ async function executeQuery( args: string[], timeout: number = 10000 ): Promise { - const pythonCmd = getConfiguredPythonPath(); + // Use getBackendPythonPath() to find the correct Python: + // - In dev mode: uses backend venv with real_ladybug installed + // - In packaged app: falls back to bundled Python + const pythonCmd = getBackendPythonPath(); const scriptPath = getQueryScriptPath(); if (!scriptPath) { @@ -131,9 +198,16 @@ async function executeQuery( return new Promise((resolve) => { const fullArgs = [...baseArgs, scriptPath, command, ...args]; + + // Get Python environment (includes PYTHONPATH for bundled/venv packages) + // This is critical for finding real_ladybug (LadybugDB) + const pythonEnv = getMemoryPythonEnv(); + const proc = spawn(pythonExe, fullArgs, { stdio: ['ignore', 'pipe', 'pipe'], timeout, + // Use pythonEnv which combines sanitized env + site-packages for real_ladybug + env: pythonEnv, }); let stdout = ''; @@ -148,19 +222,29 @@ async function executeQuery( }); proc.on('close', (code) => { - if (code === 0 && stdout) { + // The Python script outputs JSON to stdout (even for errors) + // Always try to parse stdout first to get the actual error message + if (stdout) { try { const result = JSON.parse(stdout); resolve(result); + return; } catch { + // JSON parsing failed + if (code !== 0) { + const errorMsg = stderr || stdout || `Process exited with code ${code}`; + console.error('[MemoryService] Python error:', errorMsg); + resolve({ success: false, error: errorMsg }); + return; + } resolve({ success: false, error: `Invalid JSON response: ${stdout}` }); + return; } - } else { - resolve({ - success: false, - error: stderr || `Process exited with code ${code}`, - }); } + // No stdout - use stderr or generic error + const errorMsg = stderr || `Process exited with code ${code}`; + console.error('[MemoryService] Python error (no stdout):', errorMsg); + resolve({ success: false, error: errorMsg }); }); proc.on('error', (err) => { @@ -183,7 +267,10 @@ async function executeSemanticQuery( embedderConfig: EmbedderConfig, timeout: number = 30000 // Longer timeout for embedding operations ): Promise { - const pythonCmd = getConfiguredPythonPath(); + // Use getBackendPythonPath() to find the correct Python: + // - In dev mode: uses backend venv with real_ladybug installed + // - In packaged app: falls back to bundled Python + const pythonCmd = getBackendPythonPath(); const scriptPath = getQueryScriptPath(); if (!scriptPath) { @@ -192,8 +279,13 @@ async function executeSemanticQuery( const [pythonExe, baseArgs] = parsePythonCommand(pythonCmd); + // Get Python environment (includes PYTHONPATH for bundled/venv packages) + // This is critical for finding real_ladybug (LadybugDB) + const pythonEnv = getMemoryPythonEnv(); + // Build environment with embedder configuration - const env: Record = { ...process.env }; + // Use pythonEnv which combines sanitized env + site-packages for real_ladybug + const env: Record = { ...pythonEnv }; // Set the embedder provider env.GRAPHITI_EMBEDDER_PROVIDER = embedderConfig.provider; @@ -272,19 +364,26 @@ async function executeSemanticQuery( }); proc.on('close', (code) => { - if (code === 0 && stdout) { + // The Python script outputs JSON to stdout (even for errors) + if (stdout) { try { const result = JSON.parse(stdout); resolve(result); + return; } catch { + if (code !== 0) { + const errorMsg = stderr || stdout || `Process exited with code ${code}`; + console.error('[MemoryService] Semantic search error:', errorMsg); + resolve({ success: false, error: errorMsg }); + return; + } resolve({ success: false, error: `Invalid JSON response: ${stdout}` }); + return; } - } else { - resolve({ - success: false, - error: stderr || `Process exited with code ${code}`, - }); } + const errorMsg = stderr || `Process exited with code ${code}`; + console.error('[MemoryService] Semantic search error (no stdout):', errorMsg); + resolve({ success: false, error: errorMsg }); }); proc.on('error', (err) => { @@ -526,6 +625,50 @@ export class MemoryService { }; } + /** + * Add an episode to the memory database + * + * This allows the Electron app to save memories (like PR review insights) + * directly to LadybugDB without going through the full Graphiti system. + * + * @param name Episode name/title + * @param content Episode content (will be JSON stringified if object) + * @param episodeType Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review) + * @param groupId Optional group ID for namespacing + * @returns Promise with the created episode info + */ + async addEpisode( + name: string, + content: string | object, + episodeType: string = 'session_insight', + groupId?: string + ): Promise<{ success: boolean; id?: string; error?: string }> { + // Stringify content if it's an object + const contentStr = typeof content === 'object' ? JSON.stringify(content) : content; + + const args = [ + this.config.dbPath, + this.config.database, + '--name', name, + '--content', contentStr, + '--type', episodeType, + ]; + + if (groupId) { + args.push('--group-id', groupId); + } + + const result = await executeQuery('add-episode', args); + + if (!result.success) { + console.error('Failed to add episode:', result.error); + return { success: false, error: result.error }; + } + + const data = result.data as { id: string; name: string; type: string; timestamp: string }; + return { success: true, id: data.id }; + } + /** * Close the database connection (no-op for subprocess model) */ diff --git a/apps/frontend/src/main/project-store.ts b/apps/frontend/src/main/project-store.ts index 5d627c0160..dedc374fed 100644 --- a/apps/frontend/src/main/project-store.ts +++ b/apps/frontend/src/main/project-store.ts @@ -5,6 +5,7 @@ import { v4 as uuidv4 } from 'uuid'; import type { Project, ProjectSettings, Task, TaskStatus, TaskMetadata, ImplementationPlan, ReviewReason, PlanSubtask } from '../shared/types'; import { DEFAULT_PROJECT_SETTINGS, AUTO_BUILD_PATHS, getSpecsDir } from '../shared/constants'; import { getAutoBuildPath, isInitialized } from './project-initializer'; +import { getTaskWorktreeDir } from './worktree-paths'; interface TabState { openProjectIds: string[]; @@ -18,12 +19,19 @@ interface StoreData { tabState?: TabState; } +interface TasksCacheEntry { + tasks: Task[]; + timestamp: number; +} + /** * Persistent storage for projects and settings */ export class ProjectStore { private storePath: string; private data: StoreData; + private tasksCache: Map = new Map(); + private readonly CACHE_TTL_MS = 3000; // 3 seconds TTL for task cache constructor() { // Store in app's userData directory @@ -235,9 +243,19 @@ export class ProjectStore { /** * Get tasks for a project by scanning specs directory + * Implements caching with 3-second TTL to prevent excessive worktree scanning */ getTasks(projectId: string): Task[] { - console.warn('[ProjectStore] getTasks called with projectId:', projectId); + // Check cache first + const cached = this.tasksCache.get(projectId); + const now = Date.now(); + + if (cached && (now - cached.timestamp) < this.CACHE_TTL_MS) { + console.debug('[ProjectStore] Returning cached tasks for project:', projectId, '(age:', now - cached.timestamp, 'ms)'); + return cached.tasks; + } + + console.warn('[ProjectStore] getTasks called with projectId:', projectId, cached ? '(cache expired)' : '(cache miss)'); const project = this.getProject(projectId); if (!project) { console.warn('[ProjectStore] Project not found for id:', projectId); @@ -263,8 +281,7 @@ export class ProjectStore { // 2. Scan worktree specs directories // NOTE FOR MAINTAINERS: Worktree tasks are only included if the spec also exists in main. // This prevents deleted tasks from "coming back" when the worktree isn't cleaned up. - // Alternative behavior: include all worktree tasks (remove the mainSpecIds check below). - const worktreesDir = path.join(project.path, '.worktrees'); + const worktreesDir = getTaskWorktreeDir(project.path); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); @@ -303,9 +320,31 @@ export class ProjectStore { const tasks = Array.from(taskMap.values()); console.warn('[ProjectStore] Returning', tasks.length, 'unique tasks (after deduplication)'); + + // Update cache + this.tasksCache.set(projectId, { tasks, timestamp: now }); + return tasks; } + /** + * Invalidate the tasks cache for a specific project + * Call this when tasks are modified (created, deleted, status changed, etc.) + */ + invalidateTasksCache(projectId: string): void { + this.tasksCache.delete(projectId); + console.debug('[ProjectStore] Invalidated tasks cache for project:', projectId); + } + + /** + * Clear all tasks cache entries + * Useful for global refresh scenarios + */ + clearTasksCache(): void { + this.tasksCache.clear(); + console.debug('[ProjectStore] Cleared all tasks cache'); + } + /** * Load tasks from a specs directory (helper method for main project and worktrees) */ @@ -360,27 +399,8 @@ export class ProjectStore { const reqContent = readFileSync(requirementsPath, 'utf-8'); const requirements = JSON.parse(reqContent); if (requirements.task_description) { - // Extract a clean summary from task_description (first line or first ~200 chars) - const taskDesc = requirements.task_description; - const firstLine = taskDesc.split('\n')[0].trim(); - // If the first line is a title like "Investigate GitHub Issue #36", use the next meaningful line - if (firstLine.toLowerCase().startsWith('investigate') && taskDesc.includes('\n\n')) { - const sections = taskDesc.split('\n\n'); - // Find the first paragraph that's not a title - for (const section of sections) { - const trimmed = section.trim(); - // Skip headers and short lines - if (trimmed.startsWith('#') || trimmed.length < 20) continue; - // Skip the "Please analyze" instruction at the end - if (trimmed.startsWith('Please analyze')) continue; - description = trimmed.substring(0, 200).split('\n')[0]; - break; - } - } - // If still no description, use a shortened version of task_description - if (!description) { - description = firstLine.substring(0, 150); - } + // Use the full task description for the modal view + description = requirements.task_description; } } catch { // Ignore parse errors @@ -563,11 +583,16 @@ export class ProjectStore { // planStatus: "review" indicates spec creation is complete and awaiting user approval const isPlanReviewStage = (plan as unknown as { planStatus?: string })?.planStatus === 'review'; + // Determine if there is remaining work to do + // True if: no subtasks exist yet (planning in progress) OR some subtasks are incomplete + // This prevents 'in_progress' from overriding 'human_review' when all work is done + const hasRemainingWork = allSubtasks.length === 0 || allSubtasks.some((s) => s.status !== 'completed'); + const isStoredStatusValid = (storedStatus === calculatedStatus) || // Matches calculated - (storedStatus === 'human_review' && calculatedStatus === 'ai_review') || // Human review is more advanced than ai_review + (storedStatus === 'human_review' && (calculatedStatus === 'ai_review' || calculatedStatus === 'in_progress')) || // Human review is more advanced than ai_review or in_progress (fixes status loop bug) (storedStatus === 'human_review' && isPlanReviewStage) || // Plan review stage (awaiting spec approval) - (isActiveProcessStatus && storedStatus === 'in_progress'); // Planning/coding phases should show as in_progress + (isActiveProcessStatus && storedStatus === 'in_progress' && hasRemainingWork); // Planning/coding phases should show as in_progress ONLY when there's remaining work if (isStoredStatusValid) { // Preserve reviewReason for human_review status @@ -643,7 +668,7 @@ export class ProjectStore { } // 2. Check worktrees - const worktreesDir = path.join(projectPath, '.worktrees'); + const worktreesDir = getTaskWorktreeDir(projectPath); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); @@ -721,6 +746,9 @@ export class ProjectStore { } } + // Invalidate cache since task metadata changed + this.invalidateTasksCache(projectId); + return !hasErrors; } @@ -777,6 +805,9 @@ export class ProjectStore { } } + // Invalidate cache since task metadata changed + this.invalidateTasksCache(projectId); + return !hasErrors; } } diff --git a/apps/frontend/src/main/python-env-manager.ts b/apps/frontend/src/main/python-env-manager.ts index 608ba5fda5..778c641af0 100644 --- a/apps/frontend/src/main/python-env-manager.ts +++ b/apps/frontend/src/main/python-env-manager.ts @@ -619,23 +619,40 @@ if sys.version_info >= (3, 12): /** * Get environment variables that should be set when spawning Python processes. * This ensures Python finds the bundled packages or venv packages. + * + * IMPORTANT: This returns a COMPLETE environment (based on process.env) with + * problematic Python variables removed. This fixes the "Could not find platform + * independent libraries " error on Windows when PYTHONHOME is set. + * + * @see https://github.com/AndyMik90/Auto-Claude/issues/176 */ getPythonEnv(): Record { - const env: Record = { + // Start with process.env but explicitly remove problematic Python variables + // PYTHONHOME causes "Could not find platform independent libraries" when set + // to a different Python installation than the one we're spawning + const baseEnv: Record = {}; + + for (const [key, value] of Object.entries(process.env)) { + // Skip PYTHONHOME - it causes the "platform independent libraries" error + // Use case-insensitive check for Windows compatibility (env vars are case-insensitive on Windows) + // Skip undefined values (TypeScript type guard) + if (key.toUpperCase() !== 'PYTHONHOME' && value !== undefined) { + baseEnv[key] = value; + } + } + + // Apply our Python configuration on top + return { + ...baseEnv, // Don't write bytecode - not needed and avoids permission issues PYTHONDONTWRITEBYTECODE: '1', // Use UTF-8 encoding PYTHONIOENCODING: 'utf-8', // Disable user site-packages to avoid conflicts PYTHONNOUSERSITE: '1', + // Override PYTHONPATH if we have bundled packages + ...(this.sitePackagesPath ? { PYTHONPATH: this.sitePackagesPath } : {}), }; - - // Set PYTHONPATH to our site-packages - if (this.sitePackagesPath) { - env.PYTHONPATH = this.sitePackagesPath; - } - - return env; } /** diff --git a/apps/frontend/src/main/release-service.ts b/apps/frontend/src/main/release-service.ts index ed7367d5db..b05152256d 100644 --- a/apps/frontend/src/main/release-service.ts +++ b/apps/frontend/src/main/release-service.ts @@ -344,16 +344,12 @@ export class ReleaseService extends EventEmitter { tasks: Task[] ): Promise { const unmerged: UnmergedWorktreeInfo[] = []; - - // Get worktrees directory - const worktreesDir = path.join(projectPath, '.worktrees', 'auto-claude'); + const worktreesDir = path.join(projectPath, '.auto-claude', 'worktrees', 'tasks'); if (!existsSync(worktreesDir)) { - // No worktrees exist at all - all clear return []; } - // List all spec worktrees let worktreeFolders: string[]; try { worktreeFolders = readdirSync(worktreesDir, { withFileTypes: true }) @@ -366,17 +362,16 @@ export class ReleaseService extends EventEmitter { // Check each spec ID that's in this release for (const specId of releaseSpecIds) { // Find the worktree folder for this spec - // Spec IDs are like "001-feature-name", worktree folders match - const worktreeFolder = worktreeFolders.find(folder => + const matchingFolder = worktreeFolders.find(folder => folder === specId || folder.startsWith(`${specId}-`) ); - if (!worktreeFolder) { + if (!matchingFolder) { // No worktree for this spec - it's already merged/cleaned up continue; } - const worktreePath = path.join(worktreesDir, worktreeFolder); + const worktreePath = path.join(worktreesDir, matchingFolder); // Get the task info for better error messages const task = tasks.find(t => t.specId === specId); diff --git a/apps/frontend/src/main/sentry.ts b/apps/frontend/src/main/sentry.ts new file mode 100644 index 0000000000..0ab4e6602a --- /dev/null +++ b/apps/frontend/src/main/sentry.ts @@ -0,0 +1,167 @@ +/** + * Sentry Error Tracking for Main Process + * + * Initializes Sentry with: + * - beforeSend hook for mid-session toggle support (no restart needed) + * - Path masking for user privacy (shared with renderer) + * - IPC listener for settings changes from renderer + * + * Privacy Note: + * - Usernames are masked from all file paths + * - Project paths remain visible for debugging (this is expected) + * - Tags, contexts, extra data, and user info are all sanitized + */ + +import * as Sentry from '@sentry/electron/main'; +import { app, ipcMain } from 'electron'; +import { readSettingsFile } from './settings-utils'; +import { DEFAULT_APP_SETTINGS } from '../shared/constants'; +import { IPC_CHANNELS } from '../shared/constants/ipc'; +import { + processEvent, + PRODUCTION_TRACE_SAMPLE_RATE, + type SentryErrorEvent +} from '../shared/utils/sentry-privacy'; + +// In-memory state for current setting (updated via IPC when user toggles) +let sentryEnabledState = true; + +/** + * Get Sentry DSN from environment variable + * + * For local development/testing: + * - Add SENTRY_DSN to your .env file, or + * - Run: SENTRY_DSN=your-dsn npm start + * + * For CI/CD releases: + * - Set SENTRY_DSN as a GitHub Actions secret + * + * For forks: + * - Without SENTRY_DSN, Sentry is disabled (safe for forks) + */ +function getSentryDsn(): string { + return process.env.SENTRY_DSN || ''; +} + +/** + * Get trace sample rate from environment variable + * Controls performance monitoring sampling (0.0 to 1.0) + * Default: 0.1 (10%) in production, 0 in development + */ +function getTracesSampleRate(): number { + const envValue = process.env.SENTRY_TRACES_SAMPLE_RATE; + if (envValue !== undefined) { + const parsed = parseFloat(envValue); + if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) { + return parsed; + } + } + // Default: 10% in production, 0 in dev + return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0; +} + +/** + * Get profile sample rate from environment variable + * Controls profiling sampling relative to traces (0.0 to 1.0) + * Default: 0.1 (10%) in production, 0 in development + */ +function getProfilesSampleRate(): number { + const envValue = process.env.SENTRY_PROFILES_SAMPLE_RATE; + if (envValue !== undefined) { + const parsed = parseFloat(envValue); + if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) { + return parsed; + } + } + // Default: 10% in production, 0 in dev + return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0; +} + +// Cache config so renderer can access it via IPC +let cachedDsn: string = ''; +let cachedTracesSampleRate: number = 0; +let cachedProfilesSampleRate: number = 0; + +/** + * Initialize Sentry for the main process + * Called early in app startup, before window creation + */ +export function initSentryMain(): void { + // Get configuration from environment variables + cachedDsn = getSentryDsn(); + cachedTracesSampleRate = getTracesSampleRate(); + cachedProfilesSampleRate = getProfilesSampleRate(); + + // Read initial setting from disk synchronously + const savedSettings = readSettingsFile(); + const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + sentryEnabledState = settings.sentryEnabled ?? true; + + // Check if we have a DSN - if not, Sentry is effectively disabled + const hasDsn = cachedDsn.length > 0; + const shouldEnable = hasDsn && (app.isPackaged || process.env.SENTRY_DEV === 'true'); + + if (!hasDsn) { + console.log('[Sentry] No SENTRY_DSN configured - error reporting disabled'); + console.log('[Sentry] To enable: set SENTRY_DSN environment variable'); + } + + Sentry.init({ + dsn: cachedDsn, + environment: app.isPackaged ? 'production' : 'development', + release: `auto-claude@${app.getVersion()}`, + + beforeSend(event: Sentry.ErrorEvent) { + if (!sentryEnabledState) { + return null; + } + // Process event with shared privacy utility + return processEvent(event as SentryErrorEvent) as Sentry.ErrorEvent; + }, + + // Sample rates from environment variables (default: 10% in production, 0 in dev) + tracesSampleRate: cachedTracesSampleRate, + profilesSampleRate: cachedProfilesSampleRate, + + // Only enable if we have a DSN and are in production (or SENTRY_DEV is set) + enabled: shouldEnable, + }); + + // Listen for settings changes from renderer process + ipcMain.on(IPC_CHANNELS.SENTRY_STATE_CHANGED, (_event, enabled: boolean) => { + sentryEnabledState = enabled; + console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (via IPC)`); + }); + + // IPC handler for renderer to get Sentry config + ipcMain.handle(IPC_CHANNELS.GET_SENTRY_DSN, () => { + return cachedDsn; + }); + + ipcMain.handle(IPC_CHANNELS.GET_SENTRY_CONFIG, () => { + return { + dsn: cachedDsn, + tracesSampleRate: cachedTracesSampleRate, + profilesSampleRate: cachedProfilesSampleRate, + }; + }); + + if (hasDsn) { + console.log(`[Sentry] Main process initialized (enabled: ${sentryEnabledState}, traces: ${cachedTracesSampleRate}, profiles: ${cachedProfilesSampleRate})`); + } +} + +/** + * Get current Sentry enabled state + */ +export function isSentryEnabled(): boolean { + return sentryEnabledState; +} + +/** + * Set Sentry enabled state programmatically + */ +export function setSentryEnabled(enabled: boolean): void { + sentryEnabledState = enabled; + console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (programmatic)`); +} diff --git a/apps/frontend/src/main/services/profile-service.test.ts b/apps/frontend/src/main/services/profile-service.test.ts new file mode 100644 index 0000000000..028e7c9bdf --- /dev/null +++ b/apps/frontend/src/main/services/profile-service.test.ts @@ -0,0 +1,1031 @@ +/** + * Tests for profile-service.ts + * + * Red phase - write failing tests first + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + getAPIProfileEnv, + testConnection +} from './profile-service'; +import type { APIProfile, ProfilesFile, TestConnectionResult } from '../../shared/types/profile'; + +// Mock profile-manager +vi.mock('../utils/profile-manager', () => ({ + loadProfilesFile: vi.fn(), + saveProfilesFile: vi.fn(), + generateProfileId: vi.fn(() => 'mock-uuid-1234') +})); + +describe('profile-service', () => { + describe('validateBaseUrl', () => { + it('should accept valid HTTPS URLs', () => { + expect(validateBaseUrl('https://api.anthropic.com')).toBe(true); + expect(validateBaseUrl('https://custom-api.example.com')).toBe(true); + expect(validateBaseUrl('https://api.example.com/v1')).toBe(true); + }); + + it('should accept valid HTTP URLs', () => { + expect(validateBaseUrl('http://localhost:8080')).toBe(true); + expect(validateBaseUrl('http://127.0.0.1:8000')).toBe(true); + }); + + it('should reject invalid URLs', () => { + expect(validateBaseUrl('not-a-url')).toBe(false); + expect(validateBaseUrl('ftp://example.com')).toBe(false); + expect(validateBaseUrl('')).toBe(false); + expect(validateBaseUrl('https://')).toBe(false); + }); + + it('should reject URLs without valid format', () => { + expect(validateBaseUrl('anthropic.com')).toBe(false); + expect(validateBaseUrl('://api.anthropic.com')).toBe(false); + }); + }); + + describe('validateApiKey', () => { + it('should accept Anthropic API key format (sk-ant-...)', () => { + expect(validateApiKey('sk-ant-api03-12345')).toBe(true); + expect(validateApiKey('sk-ant-test-key')).toBe(true); + }); + + it('should accept OpenAI API key format (sk-...)', () => { + expect(validateApiKey('sk-proj-12345')).toBe(true); + expect(validateApiKey('sk-test-key-12345')).toBe(true); + }); + + it('should accept custom API keys with reasonable length', () => { + expect(validateApiKey('custom-key-12345678')).toBe(true); + expect(validateApiKey('x-api-key-abcdefghij')).toBe(true); + }); + + it('should reject empty or too short keys', () => { + expect(validateApiKey('')).toBe(false); + expect(validateApiKey('sk-')).toBe(false); + expect(validateApiKey('abc')).toBe(false); + }); + + it('should reject keys with only whitespace', () => { + expect(validateApiKey(' ')).toBe(false); + expect(validateApiKey('\t\n')).toBe(false); + }); + }); + + describe('validateProfileNameUnique', () => { + it('should return true when name is unique', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('New Profile'); + expect(result).toBe(true); + }); + + it('should return false when name already exists', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('Existing Profile'); + expect(result).toBe(false); + }); + + it('should be case-insensitive for duplicate detection', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result1 = await validateProfileNameUnique('my profile'); + const result2 = await validateProfileNameUnique('MY PROFILE'); + expect(result1).toBe(false); + expect(result2).toBe(false); + }); + + it('should trim whitespace before checking', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique(' My Profile '); + expect(result).toBe(false); + }); + }); + + describe('createProfile', () => { + it('should create profile with valid data and save', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile, generateProfileId } = + await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(generateProfileId).mockReturnValue('generated-id-123'); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }; + + const result = await createProfile(input); + + expect(result).toMatchObject({ + id: 'generated-id-123', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }); + expect(result.createdAt).toBeGreaterThan(0); + expect(result.updatedAt).toBeGreaterThan(0); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error for invalid base URL', async () => { + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'too-short' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error for duplicate profile name', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + name: 'Existing Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + }); + + describe('updateProfile', () => { + it('should update profile name and other fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Old Name', + baseUrl: 'https://old-api.example.com', + apiKey: 'sk-old-key-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'New Name', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-123', + models: { default: 'claude-3-5-sonnet-20241022' } + }; + + const result = await updateProfile(input); + + expect(result.name).toBe('New Name'); + expect(result.baseUrl).toBe('https://new-api.example.com'); + expect(result.apiKey).toBe('sk-new-api-key-123'); + expect(result.models).toEqual({ default: 'claude-3-5-sonnet-20241022' }); + expect(result.updatedAt).toBeGreaterThan(1000000); // updatedAt should be refreshed + expect(result.createdAt).toBe(1000000); // createdAt should remain unchanged + }); + + it('should allow updating profile with same name (case-insensitive)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-old-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'my profile', // Same name, different case + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-456' + }; + + const result = await updateProfile(input); + expect(result.name).toBe('my profile'); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error when name conflicts with another profile', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: 1000000, + updatedAt: 1000000 + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'profile-1', + name: 'Profile Two', // Name that exists on profile-2 + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678' + }; + + await expect(updateProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + + it('should throw error for invalid base URL', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'too-short' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error when profile not found', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'non-existent-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Profile not found'); + }); + }); + + describe('getAPIProfileEnv', () => { + it('should return empty object when no active profile (OAuth mode)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, // No active profile = OAuth mode + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return empty object when activeProfileId is empty string', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: '', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return correct env vars for active profile with all fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.custom.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: 'claude-3-5-haiku-20241022', + sonnet: 'claude-3-5-sonnet-20241022', + opus: 'claude-3-5-opus-20241022' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.custom.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should filter out empty string values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: '', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: '', + sonnet: '' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Empty baseUrl should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + // Empty model values should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + // Non-empty values should be present + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + + it('should handle missing models object', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + // No models property + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + expect(result).not.toHaveProperty('ANTHROPIC_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL'); + }); + + it('should handle partial model configurations', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022' + // Only default model set + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL'); + }); + + it('should find active profile by id when multiple profiles exist', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + models: { default: 'claude-3-5-sonnet-20241022' }, + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-3', + name: 'Profile Three', + baseUrl: 'https://api3.example.com', + apiKey: 'sk-key-three-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-2', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api2.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-key-two-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + + it('should handle profile not found (activeProfileId points to non-existent profile)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'non-existent-id', // Points to profile that doesn't exist + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Should return empty object gracefully + expect(result).toEqual({}); + }); + + it('should trim whitespace from values before filtering', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: ' https://api.example.com ', // Has whitespace + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Whitespace should be trimmed, not filtered out + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', // Trimmed + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + }); + + it('should filter out whitespace-only values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: ' ', // Whitespace only + apiKey: 'sk-test-key-12345678', + models: { + default: ' ' // Whitespace only + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Whitespace-only values should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + expect(result).not.toHaveProperty('ANTHROPIC_MODEL'); + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + }); + }); + + describe('testConnection', () => { + beforeEach(() => { + // Mock fetch globally for testConnection tests + global.fetch = vi.fn(); + }); + + it('should return success for valid credentials (200 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.objectContaining({ + method: 'GET', + headers: expect.objectContaining({ + 'x-api-key': 'sk-ant-test-key-12', + 'anthropic-version': '2023-06-01' + }) + }) + ); + }); + + it('should return auth error for invalid API key (401 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 401, + statusText: 'Unauthorized' + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-invalid-key-12'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return auth error for 403 response', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 403, + statusText: 'Forbidden' + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-forbidden-key'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return endpoint error for invalid URL (404 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 404, + statusText: 'Not Found' + } as Response); + + const result = await testConnection('https://invalid.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + }); + + it('should return network error for connection refused', async () => { + const networkError = new TypeError('Failed to fetch'); + (networkError as any).code = 'ECONNREFUSED'; + + vi.mocked(global.fetch).mockRejectedValue(networkError); + + const result = await testConnection('https://unreachable.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return network error for ENOTFOUND (DNS failure)', async () => { + const dnsError = new TypeError('Failed to fetch'); + (dnsError as any).code = 'ENOTFOUND'; + + vi.mocked(global.fetch).mockRejectedValue(dnsError); + + const result = await testConnection('https://nosuchdomain.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return timeout error for AbortError', async () => { + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + + vi.mocked(global.fetch).mockRejectedValue(abortError); + + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should return unknown error for other failures', async () => { + vi.mocked(global.fetch).mockRejectedValue(new Error('Unknown error')); + + const result = await testConnection('https://api.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }); + }); + + it('should auto-prepend https:// if missing', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + await testConnection('api.anthropic.com', 'sk-test-key-12chars'); + + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.any(Object) + ); + }); + + it('should remove trailing slash from baseUrl', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + await testConnection('https://api.anthropic.com/', 'sk-test-key-12chars'); + + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.any(Object) + ); + }); + + it('should return error for empty baseUrl', async () => { + const result = await testConnection('', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should return error for invalid baseUrl format', async () => { + const result = await testConnection('ftp://invalid-protocol.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should return error for invalid API key format', async () => { + const result = await testConnection('https://api.anthropic.com', 'short'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should abort when signal is triggered', async () => { + const abortController = new AbortController(); + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + + vi.mocked(global.fetch).mockRejectedValue(abortError); + + // Abort immediately + abortController.abort(); + + const result = await testConnection('https://api.anthropic.com', 'sk-test-key-12chars', abortController.signal); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should set 10 second timeout', async () => { + vi.mocked(global.fetch).mockImplementation(() => + new Promise((_, reject) => { + setTimeout(() => { + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + reject(abortError); + }, 100); // Short delay for test + }) + ); + + const startTime = Date.now(); + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + const elapsed = Date.now() - startTime; + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + // Should timeout at 10 seconds, but we use a mock for faster test + expect(elapsed).toBeLessThan(5000); // Well under 10s due to mock + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile-service.ts b/apps/frontend/src/main/services/profile-service.ts new file mode 100644 index 0000000000..a58651ac56 --- /dev/null +++ b/apps/frontend/src/main/services/profile-service.ts @@ -0,0 +1,510 @@ +/** + * Profile Service - Validation and profile creation + * + * Provides validation functions for URL, API key, and profile name uniqueness. + * Handles creating new profiles with validation. + */ + +import { loadProfilesFile, saveProfilesFile, generateProfileId } from '../utils/profile-manager'; +import type { APIProfile, TestConnectionResult } from '../../shared/types/profile'; + +/** + * Validate base URL format + * Accepts HTTP(S) URLs with valid endpoints + */ +export function validateBaseUrl(baseUrl: string): boolean { + if (!baseUrl || baseUrl.trim() === '') { + return false; + } + + try { + const url = new URL(baseUrl); + // Only allow http and https protocols + return url.protocol === 'http:' || url.protocol === 'https:'; + } catch { + return false; + } +} + +/** + * Validate API key format + * Accepts various API key formats (Anthropic, OpenAI, custom) + */ +export function validateApiKey(apiKey: string): boolean { + if (!apiKey || apiKey.trim() === '') { + return false; + } + + const trimmed = apiKey.trim(); + + // Too short to be a real API key + if (trimmed.length < 12) { + return false; + } + + // Accept common API key formats + // Anthropic: sk-ant-... + // OpenAI: sk-proj-... or sk-... + // Custom: any reasonable length key with alphanumeric chars + const hasValidChars = /^[a-zA-Z0-9\-_+.]+$/.test(trimmed); + + return hasValidChars; +} + +/** + * Validate that profile name is unique (case-insensitive, trimmed) + */ +export async function validateProfileNameUnique(name: string): Promise { + const trimmed = name.trim().toLowerCase(); + + const file = await loadProfilesFile(); + + // Check if any profile has the same name (case-insensitive) + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + return !exists; +} + +/** + * Input type for creating a profile (without id, createdAt, updatedAt) + */ +export type CreateProfileInput = Omit; + +/** + * Input type for updating a profile (with id, without createdAt, updatedAt) + */ +export type UpdateProfileInput = Pick & CreateProfileInput; + +/** + * Delete a profile with validation + * Throws errors for validation failures + */ +export async function deleteProfile(id: string): Promise { + const file = await loadProfilesFile(); + + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const profile = file.profiles[profileIndex]; + + // Active Profile Check: Cannot delete active profile (AC3) + if (file.activeProfileId === id) { + throw new Error('Cannot delete active profile. Please switch to another profile or OAuth first.'); + } + + // Remove profile + file.profiles.splice(profileIndex, 1); + + // Last Profile Fallback: If no profiles remain, set activeProfileId to null (AC4) + if (file.profiles.length === 0) { + file.activeProfileId = null; + } + + // Save to disk + await saveProfilesFile(file); +} + +/** + * Create a new profile with validation + * Throws errors for validation failures + */ +export async function createProfile(input: CreateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Validate profile name uniqueness + const isUnique = await validateProfileNameUnique(input.name); + if (!isUnique) { + throw new Error('A profile with this name already exists'); + } + + // Load existing profiles + const file = await loadProfilesFile(); + + // Create new profile + const now = Date.now(); + const newProfile: APIProfile = { + id: generateProfileId(), + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + createdAt: now, + updatedAt: now + }; + + // Add to profiles list + file.profiles.push(newProfile); + + // Set as active if it's the first profile + if (file.profiles.length === 1) { + file.activeProfileId = newProfile.id; + } + + // Save to disk + await saveProfilesFile(file); + + return newProfile; +} + +/** + * Update an existing profile with validation + * Throws errors for validation failures + */ +export async function updateProfile(input: UpdateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Load existing profiles + const file = await loadProfilesFile(); + + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === input.id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const existingProfile = file.profiles[profileIndex]; + + // Validate profile name uniqueness (exclude current profile from check) + if (input.name.trim().toLowerCase() !== existingProfile.name.trim().toLowerCase()) { + const trimmed = input.name.trim().toLowerCase(); + const nameExists = file.profiles.some( + (p) => p.id !== input.id && p.name.trim().toLowerCase() === trimmed + ); + if (nameExists) { + throw new Error('A profile with this name already exists'); + } + } + + // Update profile (including name) + const updatedProfile: APIProfile = { + ...existingProfile, + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + updatedAt: Date.now() + }; + + // Replace in profiles list + file.profiles[profileIndex] = updatedProfile; + + // Save to disk + await saveProfilesFile(file); + + return updatedProfile; +} + +/** + * Get environment variables for the active API profile + * + * Maps the active API profile to SDK environment variables for injection + * into Python subprocess. Returns empty object when no profile is active + * (OAuth mode), allowing CLAUDE_CODE_OAUTH_TOKEN to be used instead. + * + * Environment Variable Mapping: + * - profile.baseUrl β†’ ANTHROPIC_BASE_URL + * - profile.apiKey β†’ ANTHROPIC_AUTH_TOKEN + * - profile.models.default β†’ ANTHROPIC_MODEL + * - profile.models.haiku β†’ ANTHROPIC_DEFAULT_HAIKU_MODEL + * - profile.models.sonnet β†’ ANTHROPIC_DEFAULT_SONNET_MODEL + * - profile.models.opus β†’ ANTHROPIC_DEFAULT_OPUS_MODEL + * + * Empty string values are filtered out (not set as env vars). + * + * @returns Promise> Environment variables for active profile + */ +export async function getAPIProfileEnv(): Promise> { + // Load profiles.json + const file = await loadProfilesFile(); + + // If no active profile (null/empty), return empty object (OAuth mode) + if (!file.activeProfileId || file.activeProfileId === '') { + return {}; + } + + // Find active profile by activeProfileId + const profile = file.profiles.find((p) => p.id === file.activeProfileId); + + // If profile not found, return empty object (shouldn't happen with valid data) + if (!profile) { + return {}; + } + + // Map profile fields to SDK env vars + const envVars: Record = { + ANTHROPIC_BASE_URL: profile.baseUrl || '', + ANTHROPIC_AUTH_TOKEN: profile.apiKey || '', + ANTHROPIC_MODEL: profile.models?.default || '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: profile.models?.haiku || '', + ANTHROPIC_DEFAULT_SONNET_MODEL: profile.models?.sonnet || '', + ANTHROPIC_DEFAULT_OPUS_MODEL: profile.models?.opus || '', + }; + + // Filter out empty/whitespace string values (only set env vars that have values) + // This handles empty strings, null, undefined, and whitespace-only values + const filteredEnvVars: Record = {}; + for (const [key, value] of Object.entries(envVars)) { + const trimmedValue = value?.trim(); + if (trimmedValue && trimmedValue !== '') { + filteredEnvVars[key] = trimmedValue; + } + } + + return filteredEnvVars; +} + +/** + * Test API profile connection + * + * Validates credentials by making a minimal API request to the /v1/models endpoint. + * Returns detailed error information for different failure types. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request + * @returns Promise Result of connection test + */ +export async function testConnection( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first (key format doesn't depend on URL normalization) + if (!validateApiKey(apiKey)) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + // Normalize baseUrl BEFORE validation (allows auto-prepending https://) + let normalizedUrl = baseUrl.trim(); + + // Store original URL for error suggestions + const originalUrl = normalizedUrl; + + // If empty, return error + if (!normalizedUrl) { + return { + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + // Check if URL already has a protocol (contains ://) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Helper function to generate URL suggestions + const getUrlSuggestions = (url: string): string[] => { + const suggestions: string[] = []; + + // Check if URL lacks https:// + if (!url.includes('://')) { + suggestions.push('Ensure URL starts with https://'); + } + + // Check for trailing slash + if (url.endsWith('/')) { + suggestions.push('Remove trailing slashes from URL'); + } + + // Check for suspicious domain patterns (common typos) + const domainMatch = url.match(/:\/\/([^/]+)/); + if (domainMatch) { + const domain = domainMatch[1]; + // Check for common typos like anthropiic, ap, etc. + if (domain.includes('anthropiic') || domain.includes('anthhropic') || + domain.includes('anhtropic') || domain.length < 10) { + suggestions.push('Check for typos in domain name'); + } + } + + return suggestions; + }; + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + // Generate suggestions based on original URL + const suggestions = getUrlSuggestions(originalUrl); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Set timeout to 10 seconds (NFR-P3 compliance) + const timeoutController = new AbortController(); + const timeoutId = setTimeout(() => timeoutController.abort(), 10000); + + // Create a combined controller that aborts when either timeout or external signal aborts + const combinedController = new AbortController(); + + // Cleanup function for event listeners + const cleanup = () => { + clearTimeout(timeoutId); + }; + + // Listen to timeout abort + const onTimeoutAbort = () => { + cleanup(); + combinedController.abort(); + }; + timeoutController.signal.addEventListener('abort', onTimeoutAbort); + + // Listen to external signal abort (if provided) + let onExternalAbort: (() => void) | undefined; + if (signal) { + // If external signal already aborted, abort immediately + if (signal.aborted) { + cleanup(); + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + // Listen to external signal abort + onExternalAbort = () => { + cleanup(); + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + combinedController.abort(); + }; + signal.addEventListener('abort', onExternalAbort); + } + + const combinedSignal = combinedController.signal; + + try { + // Make minimal API request + const response = await fetch(`${normalizedUrl}/v1/models`, { + method: 'GET', + headers: { + 'x-api-key': apiKey, + 'anthropic-version': '2023-06-01' + }, + signal: combinedSignal + }); + + // Clear timeout on successful response + cleanup(); + if (onTimeoutAbort) { + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + } + if (signal && onExternalAbort) { + signal.removeEventListener('abort', onExternalAbort); + } + + // Parse response and determine error type + if (response.status === 200 || response.status === 201) { + return { + success: true, + message: 'Connection successful' + }; + } + + if (response.status === 401 || response.status === 403) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + if (response.status === 404) { + // Generate URL suggestions for 404 errors + const suggestions = getUrlSuggestions(baseUrl.trim()); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Other HTTP errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } catch (error) { + // Cleanup event listeners and timeout + cleanup(); + if (onTimeoutAbort) { + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + } + if (signal && onExternalAbort) { + signal.removeEventListener('abort', onExternalAbort); + } + + // Determine error type from error object + if (error instanceof Error) { + // AbortError β†’ timeout + if (error.name === 'AbortError') { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + // TypeError with ECONNREFUSED/ENOTFOUND β†’ network error + if (error instanceof TypeError) { + const errorCode = (error as any).code; + if (errorCode === 'ECONNREFUSED' || errorCode === 'ENOTFOUND') { + return { + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }; + } + } + } + + // Other errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } +} diff --git a/apps/frontend/src/main/services/profile/index.ts b/apps/frontend/src/main/services/profile/index.ts new file mode 100644 index 0000000000..1980eb0300 --- /dev/null +++ b/apps/frontend/src/main/services/profile/index.ts @@ -0,0 +1,43 @@ +/** + * Profile Service - Barrel Export + * + * Re-exports all profile-related functionality for convenient importing. + * Main process code should import from this index file. + */ + +// Profile Manager utilities +export { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions, + getProfilesFilePath, + withProfilesLock, + atomicModifyProfiles +} from './profile-manager'; + +// Profile Service +export { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + deleteProfile, + getAPIProfileEnv, + testConnection, + discoverModels +} from './profile-service'; + +export type { CreateProfileInput, UpdateProfileInput } from './profile-service'; + +// Re-export types from shared for convenience +export type { + APIProfile, + ProfilesFile, + ProfileFormData, + TestConnectionResult, + ModelInfo, + DiscoverModelsResult, + DiscoverModelsError +} from '@shared/types/profile'; diff --git a/apps/frontend/src/main/services/profile/profile-manager.test.ts b/apps/frontend/src/main/services/profile/profile-manager.test.ts new file mode 100644 index 0000000000..e2e336588b --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-manager.test.ts @@ -0,0 +1,208 @@ +/** + * Tests for profile-manager.ts + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions +} from './profile-manager'; +import type { ProfilesFile } from '@shared/types/profile'; + +// Use vi.hoisted to define mock functions that need to be accessible in vi.mock +const { fsMocks } = vi.hoisted(() => ({ + fsMocks: { + readFile: vi.fn(), + writeFile: vi.fn(), + mkdir: vi.fn(), + chmod: vi.fn(), + access: vi.fn(), + unlink: vi.fn(), + rename: vi.fn() + } +})); + +// Mock Electron app.getPath +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') { + return '/mock/userdata'; + } + return '/mock/path'; + }) + } +})); + +// Mock proper-lockfile +vi.mock('proper-lockfile', () => ({ + default: { + lock: vi.fn().mockResolvedValue(vi.fn().mockResolvedValue(undefined)) + } +})); + +// Mock fs module +vi.mock('fs', () => ({ + default: { + promises: fsMocks + }, + promises: fsMocks, + existsSync: vi.fn(), + constants: { + O_RDONLY: 0, + S_IRUSR: 0o400 + } +})); + +describe('profile-manager', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Setup default mocks to resolve + fsMocks.mkdir.mockResolvedValue(undefined); + fsMocks.writeFile.mockResolvedValue(undefined); + fsMocks.chmod.mockResolvedValue(undefined); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadProfilesFile', () => { + it('should return default profiles file when file does not exist', async () => { + fsMocks.readFile.mockRejectedValue(new Error('ENOENT')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should return default profiles file when file is corrupted JSON', async () => { + fsMocks.readFile.mockResolvedValue(Buffer.from('invalid json{')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should load valid profiles file', async () => { + const mockData: ProfilesFile = { + profiles: [ + { + id: 'test-id-1', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-test-key', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'test-id-1', + version: 1 + }; + + fsMocks.readFile.mockResolvedValue( + Buffer.from(JSON.stringify(mockData)) + ); + + const result = await loadProfilesFile(); + + expect(result).toEqual(mockData); + }); + + it('should use auto-claude directory for profiles.json path', async () => { + fsMocks.readFile.mockRejectedValue(new Error('ENOENT')); + + await loadProfilesFile(); + + // Verify the file path includes auto-claude + const readFileCalls = fsMocks.readFile.mock.calls; + const filePath = readFileCalls[0]?.[0]; + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + }); + }); + + describe('saveProfilesFile', () => { + it('should write profiles file to disk', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + await saveProfilesFile(mockData); + + expect(fsMocks.writeFile).toHaveBeenCalled(); + const writeFileCall = fsMocks.writeFile.mock.calls[0]; + const filePath = writeFileCall?.[0]; + const content = writeFileCall?.[1]; + + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + expect(content).toBe(JSON.stringify(mockData, null, 2)); + }); + + it('should throw error when write fails', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + fsMocks.writeFile.mockRejectedValue(new Error('Write failed')); + + await expect(saveProfilesFile(mockData)).rejects.toThrow('Write failed'); + }); + }); + + describe('generateProfileId', () => { + it('should generate unique UUID v4 format IDs', () => { + const id1 = generateProfileId(); + const id2 = generateProfileId(); + + // UUID v4 format: xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx + expect(id1).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + expect(id2).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + + // IDs should be unique + expect(id1).not.toBe(id2); + }); + + it('should generate different IDs on consecutive calls', () => { + const ids = new Set(); + for (let i = 0; i < 100; i++) { + ids.add(generateProfileId()); + } + expect(ids.size).toBe(100); + }); + }); + + describe('validateFilePermissions', () => { + it('should validate user-readable only file permissions', async () => { + // Mock successful chmod + fsMocks.chmod.mockResolvedValue(undefined); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(true); + }); + + it('should return false if chmod fails', async () => { + fsMocks.chmod.mockRejectedValue(new Error('Permission denied')); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(false); + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile/profile-manager.ts b/apps/frontend/src/main/services/profile/profile-manager.ts new file mode 100644 index 0000000000..83029f4b58 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-manager.ts @@ -0,0 +1,262 @@ +/** + * Profile Manager - File I/O for API profiles + * + * Handles loading and saving profiles.json from the auto-claude directory. + * Provides graceful handling for missing or corrupted files. + * Uses file locking to prevent race conditions in concurrent operations. + */ + +import { promises as fs } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +// @ts-expect-error - no types available for proper-lockfile +import * as lockfile from 'proper-lockfile'; +import type { APIProfile, ProfilesFile } from '@shared/types/profile'; + +/** + * Get the path to profiles.json in the auto-claude directory + */ +export function getProfilesFilePath(): string { + const userDataPath = app.getPath('userData'); + return path.join(userDataPath, 'auto-claude', 'profiles.json'); +} + +/** + * Check if a value is a valid profile object with required fields + */ +function isValidProfile(value: unknown): value is APIProfile { + if (typeof value !== 'object' || value === null) { + return false; + } + const profile = value as Record; + return ( + typeof profile.id === 'string' && + typeof profile.name === 'string' && + typeof profile.baseUrl === 'string' && + typeof profile.apiKey === 'string' && + typeof profile.createdAt === 'number' && + typeof profile.updatedAt === 'number' + ); +} + +/** + * Validate the structure of parsed profiles data + */ +function isValidProfilesFile(data: unknown): data is ProfilesFile { + if (typeof data !== 'object' || data === null) { + return false; + } + const obj = data as Record; + + // Check profiles is an array + if (!Array.isArray(obj.profiles)) { + return false; + } + + // Check each profile has required fields + for (const profile of obj.profiles) { + if (!isValidProfile(profile)) { + return false; + } + } + + // Check activeProfileId is string or null + if (obj.activeProfileId !== null && typeof obj.activeProfileId !== 'string') { + return false; + } + + // Check version is a number + if (typeof obj.version !== 'number') { + return false; + } + + return true; +} + +/** + * Default profiles file structure for fallback + */ +function getDefaultProfilesFile(): ProfilesFile { + return { + profiles: [], + activeProfileId: null, + version: 1 + }; +} + +/** + * Load profiles.json from disk + * Returns default empty profiles file if file doesn't exist or is corrupted + */ +export async function loadProfilesFile(): Promise { + const filePath = getProfilesFilePath(); + + try { + const content = await fs.readFile(filePath, 'utf-8'); + const data = JSON.parse(content); + + // Validate parsed data structure + if (isValidProfilesFile(data)) { + return data; + } + + // Validation failed - return default + return getDefaultProfilesFile(); + } catch { + // File doesn't exist or read/parse error - return default + return getDefaultProfilesFile(); + } +} + +/** + * Save profiles.json to disk + * Creates the auto-claude directory if it doesn't exist + * Ensures secure file permissions (user read/write only) + */ +export async function saveProfilesFile(data: ProfilesFile): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory exists + // mkdir with recursive: true resolves successfully if dir already exists + await fs.mkdir(dir, { recursive: true }); + + // Write file with formatted JSON + const content = JSON.stringify(data, null, 2); + await fs.writeFile(filePath, content, 'utf-8'); + + // Set secure file permissions (user read/write only - 0600) + const permissionsValid = await validateFilePermissions(filePath); + if (!permissionsValid) { + throw new Error('Failed to set secure file permissions on profiles file'); + } +} + +/** + * Generate a unique UUID v4 for a new profile + */ +export function generateProfileId(): string { + // Use crypto.randomUUID() if available (Node.js 16+ and modern browsers) + // Fall back to hand-rolled implementation for older environments + if (typeof crypto !== 'undefined' && typeof crypto.randomUUID === 'function') { + return crypto.randomUUID(); + } + + // Fallback: hand-rolled UUID v4 implementation + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }); +} + +/** + * Validate and set file permissions to user-readable only + * Returns true if successful, false otherwise + */ +export async function validateFilePermissions(filePath: string): Promise { + try { + // Set file permissions to user-readable only (0600) + await fs.chmod(filePath, 0o600); + return true; + } catch { + return false; + } +} + +/** + * Execute a function with exclusive file lock to prevent race conditions + * This ensures atomic read-modify-write operations on the profiles file + * + * @param fn Function to execute while holding the lock + * @returns Result of the function execution + */ +export async function withProfilesLock(fn: () => Promise): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory and file exist before trying to lock + await fs.mkdir(dir, { recursive: true }); + + // Create file if it doesn't exist (needed for lockfile to work) + try { + await fs.access(filePath); + } catch { + // File doesn't exist, create it atomically with exclusive flag + const defaultData = getDefaultProfilesFile(); + try { + await fs.writeFile(filePath, JSON.stringify(defaultData, null, 2), { encoding: 'utf-8', flag: 'wx' }); + } catch (err: unknown) { + // If file was created by another process (race condition), that's fine + if ((err as NodeJS.ErrnoException).code !== 'EEXIST') { + throw err; + } + // EEXIST means another process won the race, proceed normally + } + } + + // Acquire lock with reasonable timeout + let release: (() => Promise) | undefined; + try { + release = await lockfile.lock(filePath, { + retries: { + retries: 10, + minTimeout: 50, + maxTimeout: 500 + } + }); + + // Execute the function while holding the lock + return await fn(); + } finally { + // Always release the lock + if (release) { + await release(); + } + } +} + +/** + * Atomically modify the profiles file + * Loads, modifies, and saves the file within an exclusive lock + * + * @param modifier Function that modifies the ProfilesFile + * @returns The modified ProfilesFile + */ +export async function atomicModifyProfiles( + modifier: (file: ProfilesFile) => ProfilesFile | Promise +): Promise { + return await withProfilesLock(async () => { + // Load current state + const file = await loadProfilesFile(); + + // Apply modification + const modifiedFile = await modifier(file); + + // Save atomically (write to temp file and rename) + const filePath = getProfilesFilePath(); + const tempPath = `${filePath}.tmp`; + + try { + // Write to temp file + const content = JSON.stringify(modifiedFile, null, 2); + await fs.writeFile(tempPath, content, 'utf-8'); + + // Set permissions on temp file + await fs.chmod(tempPath, 0o600); + + // Atomically replace original file + await fs.rename(tempPath, filePath); + + return modifiedFile; + } catch (error) { + // Clean up temp file on error + try { + await fs.unlink(tempPath); + } catch { + // Ignore cleanup errors + } + throw error; + } + }); +} diff --git a/apps/frontend/src/main/services/profile/profile-service.test.ts b/apps/frontend/src/main/services/profile/profile-service.test.ts new file mode 100644 index 0000000000..dfd8a07955 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-service.test.ts @@ -0,0 +1,792 @@ +/** + * Tests for profile-service.ts + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + getAPIProfileEnv, + testConnection, + discoverModels +} from './profile-service'; +import type { APIProfile, ProfilesFile, TestConnectionResult } from '@shared/types/profile'; + +// Mock Anthropic SDK - use vi.hoisted to properly hoist the mock variable +const { mockModelsList, mockMessagesCreate } = vi.hoisted(() => ({ + mockModelsList: vi.fn(), + mockMessagesCreate: vi.fn() +})); + +vi.mock('@anthropic-ai/sdk', () => { + // Create mock error classes + class APIError extends Error { + status: number; + constructor(message: string, status: number) { + super(message); + this.name = 'APIError'; + this.status = status; + } + } + class AuthenticationError extends APIError { + constructor(message: string) { + super(message, 401); + this.name = 'AuthenticationError'; + } + } + class NotFoundError extends APIError { + constructor(message: string) { + super(message, 404); + this.name = 'NotFoundError'; + } + } + class APIConnectionError extends Error { + constructor(message: string) { + super(message); + this.name = 'APIConnectionError'; + } + } + class APIConnectionTimeoutError extends Error { + constructor(message: string) { + super(message); + this.name = 'APIConnectionTimeoutError'; + } + } + class BadRequestError extends APIError { + constructor(message: string) { + super(message, 400); + this.name = 'BadRequestError'; + } + } + + return { + default: class Anthropic { + models = { + list: mockModelsList + }; + messages = { + create: mockMessagesCreate + }; + }, + APIError, + AuthenticationError, + NotFoundError, + APIConnectionError, + APIConnectionTimeoutError, + BadRequestError + }; +}); + +// Mock profile-manager +vi.mock('./profile-manager', () => ({ + loadProfilesFile: vi.fn(), + saveProfilesFile: vi.fn(), + generateProfileId: vi.fn(() => 'mock-uuid-1234'), + validateFilePermissions: vi.fn().mockResolvedValue(true), + getProfilesFilePath: vi.fn(() => '/mock/profiles.json'), + atomicModifyProfiles: vi.fn(async (modifier: (file: ProfilesFile) => ProfilesFile) => { + // Get the current mock file from loadProfilesFile + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + const file = await loadProfilesFile(); + const modified = modifier(file); + await saveProfilesFile(modified); + return modified; + }) +})); + +describe('profile-service', () => { + describe('validateBaseUrl', () => { + it('should accept valid HTTPS URLs', () => { + expect(validateBaseUrl('https://api.anthropic.com')).toBe(true); + expect(validateBaseUrl('https://custom-api.example.com')).toBe(true); + expect(validateBaseUrl('https://api.example.com/v1')).toBe(true); + }); + + it('should accept valid HTTP URLs', () => { + expect(validateBaseUrl('http://localhost:8080')).toBe(true); + expect(validateBaseUrl('http://127.0.0.1:8000')).toBe(true); + }); + + it('should reject invalid URLs', () => { + expect(validateBaseUrl('not-a-url')).toBe(false); + expect(validateBaseUrl('ftp://example.com')).toBe(false); + expect(validateBaseUrl('')).toBe(false); + expect(validateBaseUrl('https://')).toBe(false); + }); + + it('should reject URLs without valid format', () => { + expect(validateBaseUrl('anthropic.com')).toBe(false); + expect(validateBaseUrl('://api.anthropic.com')).toBe(false); + }); + }); + + describe('validateApiKey', () => { + it('should accept Anthropic API key format (sk-ant-...)', () => { + expect(validateApiKey('sk-ant-api03-12345')).toBe(true); + expect(validateApiKey('sk-ant-test-key')).toBe(true); + }); + + it('should accept OpenAI API key format (sk-...)', () => { + expect(validateApiKey('sk-proj-12345')).toBe(true); + expect(validateApiKey('sk-test-key-12345')).toBe(true); + }); + + it('should accept custom API keys with reasonable length', () => { + expect(validateApiKey('custom-key-12345678')).toBe(true); + expect(validateApiKey('x-api-key-abcdefghij')).toBe(true); + }); + + it('should reject empty or too short keys', () => { + expect(validateApiKey('')).toBe(false); + expect(validateApiKey('sk-')).toBe(false); + expect(validateApiKey('abc')).toBe(false); + }); + + it('should reject keys with only whitespace', () => { + expect(validateApiKey(' ')).toBe(false); + expect(validateApiKey('\t\n')).toBe(false); + }); + }); + + describe('validateProfileNameUnique', () => { + it('should return true when name is unique', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('New Profile'); + expect(result).toBe(true); + }); + + it('should return false when name already exists', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('Existing Profile'); + expect(result).toBe(false); + }); + + it('should be case-insensitive for duplicate detection', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result1 = await validateProfileNameUnique('my profile'); + const result2 = await validateProfileNameUnique('MY PROFILE'); + expect(result1).toBe(false); + expect(result2).toBe(false); + }); + + it('should trim whitespace before checking', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique(' My Profile '); + expect(result).toBe(false); + }); + }); + + describe('createProfile', () => { + it('should create profile with valid data and save', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile, generateProfileId } = + await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(generateProfileId).mockReturnValue('generated-id-123'); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }; + + const result = await createProfile(input); + + expect(result).toMatchObject({ + id: 'generated-id-123', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }); + expect(result.createdAt).toBeGreaterThan(0); + expect(result.updatedAt).toBeGreaterThan(0); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error for invalid base URL', async () => { + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'too-short' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error for duplicate profile name', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + name: 'Existing Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + }); + + describe('updateProfile', () => { + it('should update profile name and other fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Old Name', + baseUrl: 'https://old-api.example.com', + apiKey: 'sk-old-key-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'New Name', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-123', + models: { default: 'claude-3-5-sonnet-20241022' } + }; + + const result = await updateProfile(input); + + expect(result.name).toBe('New Name'); + expect(result.baseUrl).toBe('https://new-api.example.com'); + expect(result.apiKey).toBe('sk-new-api-key-123'); + expect(result.models).toEqual({ default: 'claude-3-5-sonnet-20241022' }); + expect(result.updatedAt).toBeGreaterThan(1000000); + expect(result.createdAt).toBe(1000000); + }); + + it('should allow updating profile with same name (case-insensitive)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-old-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'my profile', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-456' + }; + + const result = await updateProfile(input); + expect(result.name).toBe('my profile'); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error when name conflicts with another profile', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: 1000000, + updatedAt: 1000000 + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'profile-1', + name: 'Profile Two', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678' + }; + + await expect(updateProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + + it('should throw error for invalid base URL', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'too-short' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error when profile not found', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'non-existent-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Profile not found'); + }); + }); + + describe('getAPIProfileEnv', () => { + it('should return empty object when no active profile (OAuth mode)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return correct env vars for active profile with all fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.custom.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: 'claude-3-5-haiku-20241022', + sonnet: 'claude-3-5-sonnet-20241022', + opus: 'claude-3-5-opus-20241022' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.custom.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should filter out empty string values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: '', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: '', + sonnet: '' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + }); + + describe('testConnection', () => { + beforeEach(() => { + mockModelsList.mockReset(); + mockMessagesCreate.mockReset(); + }); + + // Helper to create mock errors with proper name property + const createMockError = (name: string, message: string) => { + const error = new Error(message); + error.name = name; + return error; + }; + + it('should return success for valid credentials (200 response)', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await testConnection('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + }); + + it('should return auth error for invalid API key (401 response)', async () => { + mockModelsList.mockRejectedValue(createMockError('AuthenticationError', 'Unauthorized')); + + const result = await testConnection('https://api.anthropic.com', 'sk-invalid-key-12'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return network error for connection refused', async () => { + mockModelsList.mockRejectedValue(createMockError('APIConnectionError', 'ECONNREFUSED')); + + const result = await testConnection('https://unreachable.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return timeout error for AbortError', async () => { + mockModelsList.mockRejectedValue(createMockError('APIConnectionTimeoutError', 'Timeout')); + + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should auto-prepend https:// if missing', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await testConnection('api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + }); + + it('should return error for empty baseUrl', async () => { + const result = await testConnection('', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(mockModelsList).not.toHaveBeenCalled(); + }); + + it('should return error for invalid API key format', async () => { + const result = await testConnection('https://api.anthropic.com', 'short'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + expect(mockModelsList).not.toHaveBeenCalled(); + }); + }); + + describe('discoverModels', () => { + beforeEach(() => { + mockModelsList.mockReset(); + }); + + // Helper to create mock errors with proper name property + const createMockError = (name: string, message: string) => { + const error = new Error(message); + error.name = name; + return error; + }; + + it('should return list of models for successful response', async () => { + mockModelsList.mockResolvedValue({ + data: [ + { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude Sonnet 3.5', created_at: '2024-10-22', type: 'model' }, + { id: 'claude-3-5-haiku-20241022', display_name: 'Claude Haiku 3.5', created_at: '2024-10-22', type: 'model' } + ] + }); + + const result = await discoverModels('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + models: [ + { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude Sonnet 3.5' }, + { id: 'claude-3-5-haiku-20241022', display_name: 'Claude Haiku 3.5' } + ] + }); + }); + + it('should throw auth error for 401 response', async () => { + mockModelsList.mockRejectedValue(createMockError('AuthenticationError', 'Unauthorized')); + + const error = await discoverModels('https://api.anthropic.com', 'sk-invalid-key') + .catch(e => e); + + expect(error).toBeInstanceOf(Error); + expect((error as Error & { errorType?: string }).errorType).toBe('auth'); + }); + + it('should throw not_supported error for 404 response', async () => { + mockModelsList.mockRejectedValue(createMockError('NotFoundError', 'Not Found')); + + const error = await discoverModels('https://custom-api.com', 'sk-test-key-12345678') + .catch(e => e); + + expect(error).toBeInstanceOf(Error); + expect((error as Error & { errorType?: string }).errorType).toBe('not_supported'); + }); + + it('should auto-prepend https:// if missing', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await discoverModels('api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ models: [] }); + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile/profile-service.ts b/apps/frontend/src/main/services/profile/profile-service.ts new file mode 100644 index 0000000000..f3902049c8 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-service.ts @@ -0,0 +1,613 @@ +/** + * Profile Service - Validation and profile creation + * + * Provides validation functions for URL, API key, and profile name uniqueness. + * Handles creating new profiles with validation. + * Uses atomic operations with file locking to prevent TOCTOU race conditions. + */ + +import Anthropic, { + AuthenticationError, + NotFoundError, + APIConnectionError, + APIConnectionTimeoutError +} from '@anthropic-ai/sdk'; + +import { loadProfilesFile, generateProfileId, atomicModifyProfiles } from './profile-manager'; +import type { APIProfile, TestConnectionResult, ModelInfo, DiscoverModelsResult } from '@shared/types/profile'; + +/** + * Input type for creating a profile (without id, createdAt, updatedAt) + */ +export type CreateProfileInput = Omit; + +/** + * Input type for updating a profile (with id, without createdAt, updatedAt) + */ +export type UpdateProfileInput = Pick & CreateProfileInput; + +/** + * Validate base URL format + * Accepts HTTP(S) URLs with valid endpoints + */ +export function validateBaseUrl(baseUrl: string): boolean { + if (!baseUrl || baseUrl.trim() === '') { + return false; + } + + try { + const url = new URL(baseUrl); + // Only allow http and https protocols + return url.protocol === 'http:' || url.protocol === 'https:'; + } catch { + return false; + } +} + +/** + * Validate API key format + * Accepts various API key formats (Anthropic, OpenAI, custom) + */ +export function validateApiKey(apiKey: string): boolean { + if (!apiKey || apiKey.trim() === '') { + return false; + } + + const trimmed = apiKey.trim(); + + // Too short to be a real API key + if (trimmed.length < 12) { + return false; + } + + // Accept common API key formats + // Anthropic: sk-ant-... + // OpenAI: sk-proj-... or sk-... + // Custom: any reasonable length key with alphanumeric chars + const hasValidChars = /^[a-zA-Z0-9\-_+.]+$/.test(trimmed); + + return hasValidChars; +} + +/** + * Validate that profile name is unique (case-insensitive, trimmed) + * + * WARNING: This is for UX feedback only. Do NOT rely on this for correctness. + * The actual uniqueness check happens atomically inside create/update operations + * to prevent TOCTOU race conditions. + */ +export async function validateProfileNameUnique(name: string): Promise { + const trimmed = name.trim().toLowerCase(); + + const file = await loadProfilesFile(); + + // Check if any profile has the same name (case-insensitive) + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + return !exists; +} + +/** + * Delete a profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions + */ +export async function deleteProfile(id: string): Promise { + await atomicModifyProfiles((file) => { + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + // Active Profile Check: Cannot delete active profile (AC3) + if (file.activeProfileId === id) { + throw new Error('Cannot delete active profile. Please switch to another profile or OAuth first.'); + } + + // Remove profile + file.profiles.splice(profileIndex, 1); + + // Last Profile Fallback: If no profiles remain, set activeProfileId to null (AC4) + if (file.profiles.length === 0) { + file.activeProfileId = null; + } + + return file; + }); +} + +/** + * Create a new profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions in concurrent profile creation + */ +export async function createProfile(input: CreateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Use atomic operation to ensure uniqueness check and creation happen together + // This prevents TOCTOU race where another process creates the same profile name + // between our check and write + const newProfile = await atomicModifyProfiles((file) => { + // Re-check uniqueness within the lock (this is the authoritative check) + const trimmed = input.name.trim().toLowerCase(); + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + if (exists) { + throw new Error('A profile with this name already exists'); + } + + // Create new profile + const now = Date.now(); + const profile: APIProfile = { + id: generateProfileId(), + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + createdAt: now, + updatedAt: now + }; + + // Add to profiles list + file.profiles.push(profile); + + // Set as active if it's the first profile + if (file.profiles.length === 1) { + file.activeProfileId = profile.id; + } + + return file; + }); + + // Find and return the newly created profile + const createdProfile = newProfile.profiles[newProfile.profiles.length - 1]; + return createdProfile; +} + +/** + * Update an existing profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions in concurrent profile updates + */ +export async function updateProfile(input: UpdateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Use atomic operation to ensure uniqueness check and update happen together + const modifiedFile = await atomicModifyProfiles((file) => { + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === input.id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const existingProfile = file.profiles[profileIndex]; + + // Validate profile name uniqueness (exclude current profile from check) + // This check happens atomically within the lock + if (input.name.trim().toLowerCase() !== existingProfile.name.trim().toLowerCase()) { + const trimmed = input.name.trim().toLowerCase(); + const nameExists = file.profiles.some( + (p) => p.id !== input.id && p.name.trim().toLowerCase() === trimmed + ); + if (nameExists) { + throw new Error('A profile with this name already exists'); + } + } + + // Update profile (including name) + const updated: APIProfile = { + ...existingProfile, + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + updatedAt: Date.now() + }; + + // Replace in profiles list + file.profiles[profileIndex] = updated; + + return file; + }); + + // Find and return the updated profile + const updatedProfile = modifiedFile.profiles.find((p) => p.id === input.id)!; + return updatedProfile; +} + +/** + * Get environment variables for the active API profile + * + * Maps the active API profile to SDK environment variables for injection + * into Python subprocess. Returns empty object when no profile is active + * (OAuth mode), allowing CLAUDE_CODE_OAUTH_TOKEN to be used instead. + * + * Environment Variable Mapping: + * - profile.baseUrl β†’ ANTHROPIC_BASE_URL + * - profile.apiKey β†’ ANTHROPIC_AUTH_TOKEN + * - profile.models.default β†’ ANTHROPIC_MODEL + * - profile.models.haiku β†’ ANTHROPIC_DEFAULT_HAIKU_MODEL + * - profile.models.sonnet β†’ ANTHROPIC_DEFAULT_SONNET_MODEL + * - profile.models.opus β†’ ANTHROPIC_DEFAULT_OPUS_MODEL + * + * Empty string values are filtered out (not set as env vars). + * + * @returns Promise> Environment variables for active profile + */ +export async function getAPIProfileEnv(): Promise> { + // Load profiles.json + const file = await loadProfilesFile(); + + // If no active profile (null/empty), return empty object (OAuth mode) + if (!file.activeProfileId || file.activeProfileId === '') { + return {}; + } + + // Find active profile by activeProfileId + const profile = file.profiles.find((p) => p.id === file.activeProfileId); + + // If profile not found, return empty object (shouldn't happen with valid data) + if (!profile) { + return {}; + } + + // Map profile fields to SDK env vars + const envVars: Record = { + ANTHROPIC_BASE_URL: profile.baseUrl || '', + ANTHROPIC_AUTH_TOKEN: profile.apiKey || '', + ANTHROPIC_MODEL: profile.models?.default || '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: profile.models?.haiku || '', + ANTHROPIC_DEFAULT_SONNET_MODEL: profile.models?.sonnet || '', + ANTHROPIC_DEFAULT_OPUS_MODEL: profile.models?.opus || '', + }; + + // Filter out empty/whitespace string values (only set env vars that have values) + // This handles empty strings, null, undefined, and whitespace-only values + const filteredEnvVars: Record = {}; + for (const [key, value] of Object.entries(envVars)) { + const trimmedValue = value?.trim(); + if (trimmedValue && trimmedValue !== '') { + filteredEnvVars[key] = trimmedValue; + } + } + + return filteredEnvVars; +} + +/** + * Test API profile connection + * + * Validates credentials by making a minimal API request to the /v1/models endpoint. + * Uses the Anthropic SDK for built-in timeout, retry, and error handling. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request + * @returns Promise Result of connection test + */ +export async function testConnection( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first (key format doesn't depend on URL normalization) + if (!validateApiKey(apiKey)) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + // Normalize baseUrl BEFORE validation (allows auto-prepending https://) + let normalizedUrl = baseUrl.trim(); + + // Store original URL for error suggestions + const originalUrl = normalizedUrl; + + // If empty, return error + if (!normalizedUrl) { + return { + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Helper function to generate URL suggestions + const getUrlSuggestions = (url: string): string[] => { + const suggestions: string[] = []; + + if (!url.includes('://')) { + suggestions.push('Ensure URL starts with https://'); + } + + if (url.endsWith('/')) { + suggestions.push('Remove trailing slashes from URL'); + } + + const domainMatch = url.match(/:\/\/([^/]+)/); + if (domainMatch) { + const domain = domainMatch[1]; + if (domain.includes('anthropiic') || domain.includes('anthhropic') || + domain.includes('anhtropic') || domain.length < 10) { + suggestions.push('Check for typos in domain name'); + } + } + + return suggestions; + }; + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + const suggestions = getUrlSuggestions(originalUrl); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Check if signal already aborted + if (signal?.aborted) { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + try { + // Create Anthropic client with SDK + const client = new Anthropic({ + apiKey, + baseURL: normalizedUrl, + timeout: 10000, // 10 seconds + maxRetries: 0, // Disable retries for immediate feedback + }); + + // Make minimal request to test connection (pass signal for cancellation) + // Try models.list first, but some Anthropic-compatible APIs don't support it + try { + await client.models.list({ limit: 1 }, { signal: signal ?? undefined }); + } catch (modelsError) { + // If models endpoint returns 404, try messages endpoint instead + // Many Anthropic-compatible APIs (e.g., MiniMax) only support /v1/messages + const modelsErrorName = modelsError instanceof Error ? modelsError.name : ''; + if (modelsErrorName === 'NotFoundError' || modelsError instanceof NotFoundError) { + // Fall back to messages endpoint with minimal request + // This will fail with 400 (invalid request) but proves the endpoint is reachable + try { + await client.messages.create({ + model: 'test', + max_tokens: 1, + messages: [{ role: 'user', content: 'test' }] + }, { signal: signal ?? undefined }); + } catch (messagesError) { + const messagesErrorName = messagesError instanceof Error ? messagesError.name : ''; + // 400/422 errors mean the endpoint is valid, just our test request was invalid + // This is expected - we're just testing connectivity + if (messagesErrorName === 'BadRequestError' || + messagesErrorName === 'InvalidRequestError' || + (messagesError instanceof Error && 'status' in messagesError && + ((messagesError as { status?: number }).status === 400 || + (messagesError as { status?: number }).status === 422))) { + // Endpoint is valid, connection successful + return { + success: true, + message: 'Connection successful' + }; + } + // Re-throw other errors to be handled by outer catch + throw messagesError; + } + // If messages.create somehow succeeded, connection is valid + return { + success: true, + message: 'Connection successful' + }; + } + // Re-throw non-404 errors to be handled by outer catch + throw modelsError; + } + + return { + success: true, + message: 'Connection successful' + }; + } catch (error) { + // Map SDK errors to TestConnectionResult error types + // Use error.name for instanceof-like checks (works with mocks that set this.name) + const errorName = error instanceof Error ? error.name : ''; + + if (errorName === 'AuthenticationError' || error instanceof AuthenticationError) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + if (errorName === 'NotFoundError' || error instanceof NotFoundError) { + const suggestions = getUrlSuggestions(baseUrl.trim()); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + if (errorName === 'APIConnectionTimeoutError' || error instanceof APIConnectionTimeoutError) { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + if (errorName === 'APIConnectionError' || error instanceof APIConnectionError) { + return { + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }; + } + + // APIError or other errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } +} + +/** + * Discover available models from API endpoint + * + * Fetches the list of available models from the Anthropic-compatible /v1/models endpoint. + * Uses the Anthropic SDK for built-in timeout, retry, and error handling. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request (checked before request) + * @returns Promise List of available models + * @throws Error with errorType for auth/network/endpoint/timeout/not_supported failures + */ +export async function discoverModels( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first + if (!validateApiKey(apiKey)) { + const error: Error & { errorType?: string } = new Error('Authentication failed. Please check your API key.'); + error.errorType = 'auth'; + throw error; + } + + // Normalize baseUrl BEFORE validation + let normalizedUrl = baseUrl.trim(); + + // If empty, throw error + if (!normalizedUrl) { + const error: Error & { errorType?: string } = new Error('Invalid endpoint. Please check the Base URL.'); + error.errorType = 'endpoint'; + throw error; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + const error: Error & { errorType?: string } = new Error('Invalid endpoint. Please check the Base URL.'); + error.errorType = 'endpoint'; + throw error; + } + + // Check if signal already aborted + if (signal?.aborted) { + const error: Error & { errorType?: string } = new Error('Connection timeout. The endpoint did not respond.'); + error.errorType = 'timeout'; + throw error; + } + + try { + // Create Anthropic client with SDK + const client = new Anthropic({ + apiKey, + baseURL: normalizedUrl, + timeout: 10000, // 10 seconds + maxRetries: 0, // Disable retries for immediate feedback + }); + + // Fetch models with pagination (1000 limit to get all), pass signal for cancellation + const response = await client.models.list({ limit: 1000 }, { signal: signal ?? undefined }); + + // Extract model information from SDK response + const models: ModelInfo[] = response.data + .map((model) => ({ + id: model.id || '', + display_name: model.display_name || model.id || '' + })) + .filter((model) => model.id.length > 0); + + return { models }; + } catch (error) { + // Map SDK errors to thrown errors with errorType property + // Use error.name for instanceof-like checks (works with mocks that set this.name) + const errorName = error instanceof Error ? error.name : ''; + + if (errorName === 'AuthenticationError' || error instanceof AuthenticationError) { + const authError: Error & { errorType?: string } = new Error('Authentication failed. Please check your API key.'); + authError.errorType = 'auth'; + throw authError; + } + + if (errorName === 'NotFoundError' || error instanceof NotFoundError) { + const notSupportedError: Error & { errorType?: string } = new Error('This API endpoint does not support model listing. Please enter the model name manually.'); + notSupportedError.errorType = 'not_supported'; + throw notSupportedError; + } + + if (errorName === 'APIConnectionTimeoutError' || error instanceof APIConnectionTimeoutError) { + const timeoutError: Error & { errorType?: string } = new Error('Connection timeout. The endpoint did not respond.'); + timeoutError.errorType = 'timeout'; + throw timeoutError; + } + + if (errorName === 'APIConnectionError' || error instanceof APIConnectionError) { + const networkError: Error & { errorType?: string } = new Error('Network error. Please check your internet connection.'); + networkError.errorType = 'network'; + throw networkError; + } + + // APIError or other errors + const unknownError: Error & { errorType?: string } = new Error('Connection test failed. Please try again.'); + unknownError.errorType = 'unknown'; + throw unknownError; + } +} diff --git a/apps/frontend/src/main/task-log-service.ts b/apps/frontend/src/main/task-log-service.ts index 9ad2569649..7752143857 100644 --- a/apps/frontend/src/main/task-log-service.ts +++ b/apps/frontend/src/main/task-log-service.ts @@ -2,6 +2,15 @@ import path from 'path'; import { existsSync, readFileSync, watchFile } from 'fs'; import { EventEmitter } from 'events'; import type { TaskLogs, TaskLogPhase, TaskLogStreamChunk, TaskPhaseLog } from '../shared/types'; +import { findTaskWorktree } from './worktree-paths'; + +function findWorktreeSpecDir(projectPath: string, specId: string, specsRelPath: string): string | null { + const worktreePath = findTaskWorktree(projectPath, specId); + if (worktreePath) { + return path.join(worktreePath, specsRelPath, specId); + } + return null; +} /** * Service for loading and watching phase-based task logs (task_logs.json) @@ -120,7 +129,7 @@ export class TaskLogService extends EventEmitter { worktreeSpecDir = watchedInfo[1].worktreeSpecDir; } else if (projectPath && specsRelPath && specId) { // Calculate worktree path from provided params - worktreeSpecDir = path.join(projectPath, '.worktrees', specId, specsRelPath, specId); + worktreeSpecDir = findWorktreeSpecDir(projectPath, specId, specsRelPath); } if (!worktreeSpecDir) { @@ -172,16 +181,22 @@ export class TaskLogService extends EventEmitter { * @param specsRelPath - Optional: Relative path to specs (e.g., "auto-claude/specs") */ startWatching(specId: string, specDir: string, projectPath?: string, specsRelPath?: string): void { - // Stop any existing watch + // Check if already watching with the same parameters (prevents rapid watch/unwatch cycles) + const existingWatch = this.watchedPaths.get(specId); + if (existingWatch && existingWatch.mainSpecDir === specDir) { + // Already watching this spec with the same spec directory - no-op + return; + } + + // Stop any existing watch (different spec dir or first time) this.stopWatching(specId); const mainLogFile = path.join(specDir, 'task_logs.json'); // Calculate worktree spec directory path if we have project info - // Worktree structure: .worktrees/{specId}/{specsRelPath}/{specId}/ let worktreeSpecDir: string | null = null; if (projectPath && specsRelPath) { - worktreeSpecDir = path.join(projectPath, '.worktrees', specId, specsRelPath, specId); + worktreeSpecDir = findWorktreeSpecDir(projectPath, specId, specsRelPath); } // Store watched paths for this specId @@ -222,10 +237,31 @@ export class TaskLogService extends EventEmitter { } // Poll for changes in both locations + // Note: worktreeSpecDir may be null initially if worktree doesn't exist yet. + // We need to dynamically re-discover it during polling. const pollInterval = setInterval(() => { let mainChanged = false; let worktreeChanged = false; + // Dynamically re-discover worktree if not found yet + // This handles the case where user opens logs before worktree is created + const watchedInfo = this.watchedPaths.get(specId); + let currentWorktreeSpecDir = watchedInfo?.worktreeSpecDir || null; + + if (!currentWorktreeSpecDir && projectPath && specsRelPath) { + const discoveredWorktree = findWorktreeSpecDir(projectPath, specId, specsRelPath); + if (discoveredWorktree) { + currentWorktreeSpecDir = discoveredWorktree; + // Update stored paths so future iterations don't need to re-discover + this.watchedPaths.set(specId, { + mainSpecDir: specDir, + worktreeSpecDir: discoveredWorktree, + specsRelPath: specsRelPath + }); + console.warn(`[TaskLogService] Discovered worktree for ${specId}: ${discoveredWorktree}`); + } + } + // Check main spec dir if (existsSync(mainLogFile)) { try { @@ -240,8 +276,8 @@ export class TaskLogService extends EventEmitter { } // Check worktree spec dir - if (worktreeSpecDir) { - const worktreeLogFile = path.join(worktreeSpecDir, 'task_logs.json'); + if (currentWorktreeSpecDir) { + const worktreeLogFile = path.join(currentWorktreeSpecDir, 'task_logs.json'); if (existsSync(worktreeLogFile)) { try { const currentContent = readFileSync(worktreeLogFile, 'utf-8'); diff --git a/apps/frontend/src/main/terminal-name-generator.ts b/apps/frontend/src/main/terminal-name-generator.ts index afe31de18a..d442949661 100644 --- a/apps/frontend/src/main/terminal-name-generator.ts +++ b/apps/frontend/src/main/terminal-name-generator.ts @@ -46,6 +46,23 @@ export class TerminalNameGenerator extends EventEmitter { return this.autoBuildSourcePath; } + // In packaged app, check userData override first (consistent with path-resolver.ts) + if (app.isPackaged) { + // Check for user-updated backend source first (takes priority over bundled) + const overridePath = path.join(app.getPath('userData'), 'backend-source'); + if (existsSync(overridePath) && existsSync(path.join(overridePath, 'runners', 'spec_runner.py'))) { + debug('Using user-updated backend from userData:', overridePath); + return overridePath; + } + // Fall back to bundled backend in resources + const resourcesPath = path.join(process.resourcesPath, 'backend'); + if (existsSync(resourcesPath) && existsSync(path.join(resourcesPath, 'runners', 'spec_runner.py'))) { + debug('Using bundled backend from resources:', resourcesPath); + return resourcesPath; + } + } + + // Development mode paths const possiblePaths = [ // Apps structure: from out/main -> apps/backend path.resolve(__dirname, '..', '..', '..', 'backend'), diff --git a/apps/frontend/src/main/terminal-session-store.ts b/apps/frontend/src/main/terminal-session-store.ts index b3637756da..e108173a58 100644 --- a/apps/frontend/src/main/terminal-session-store.ts +++ b/apps/frontend/src/main/terminal-session-store.ts @@ -1,6 +1,7 @@ import { app } from 'electron'; import { join } from 'path'; import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs'; +import type { TerminalWorktreeConfig } from '../shared/types'; /** * Persisted terminal session data @@ -15,6 +16,8 @@ export interface TerminalSession { outputBuffer: string; // Last 100KB of output for replay createdAt: string; // ISO timestamp lastActiveAt: string; // ISO timestamp + /** Associated worktree configuration (validated on restore) */ + worktreeConfig?: TerminalWorktreeConfig; } /** @@ -203,21 +206,47 @@ export class TerminalSessionStore { this.save(); } + /** + * Validate worktree config - check if the worktree still exists + * Returns undefined if worktree doesn't exist or is invalid + */ + private validateWorktreeConfig(config: TerminalWorktreeConfig | undefined): TerminalWorktreeConfig | undefined { + if (!config) return undefined; + + // Check if the worktree path still exists + if (!existsSync(config.worktreePath)) { + console.warn(`[TerminalSessionStore] Worktree path no longer exists: ${config.worktreePath}, clearing config`); + return undefined; + } + + return config; + } + /** * Get most recent sessions for a project. * First checks today, then looks at the most recent date with sessions. - * This ensures sessions survive app restarts even after midnight. + * When restoring from a previous date, MIGRATES sessions to today to prevent + * duplication issues across days. + * Validates worktree configs - clears them if worktree no longer exists. */ getSessions(projectPath: string): TerminalSession[] { + const today = getDateString(); + // First check today const todaySessions = this.getTodaysSessions(); if (todaySessions[projectPath]?.length > 0) { - return todaySessions[projectPath]; + // Validate worktree configs before returning + return todaySessions[projectPath].map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + })); } // If no sessions today, find the most recent date with sessions for this project const dates = Object.keys(this.data.sessionsByDate) .filter(date => { + // Exclude today since we already checked it + if (date === today) return false; const sessions = this.data.sessionsByDate[date][projectPath]; return sessions && sessions.length > 0; }) @@ -225,8 +254,34 @@ export class TerminalSessionStore { if (dates.length > 0) { const mostRecentDate = dates[0]; - console.warn(`[TerminalSessionStore] No sessions today, using sessions from ${mostRecentDate}`); - return this.data.sessionsByDate[mostRecentDate][projectPath] || []; + console.warn(`[TerminalSessionStore] No sessions today, migrating sessions from ${mostRecentDate} to today`); + const sessions = this.data.sessionsByDate[mostRecentDate][projectPath] || []; + + // MIGRATE: Copy sessions to today's bucket with validated worktree configs + const migratedSessions = sessions.map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + // Update lastActiveAt to now since we're restoring them + lastActiveAt: new Date().toISOString(), + })); + + // Add migrated sessions to today + todaySessions[projectPath] = migratedSessions; + + // Remove sessions from the old date to prevent duplication + delete this.data.sessionsByDate[mostRecentDate][projectPath]; + + // Clean up empty date buckets + if (Object.keys(this.data.sessionsByDate[mostRecentDate]).length === 0) { + delete this.data.sessionsByDate[mostRecentDate]; + } + + // Save the migration + this.save(); + + console.warn(`[TerminalSessionStore] Migrated ${migratedSessions.length} sessions from ${mostRecentDate} to ${today}`); + + return migratedSessions; } return []; @@ -234,11 +289,17 @@ export class TerminalSessionStore { /** * Get sessions for a specific date and project + * Validates worktree configs - clears them if worktree no longer exists. */ getSessionsForDate(date: string, projectPath: string): TerminalSession[] { const dateSessions = this.data.sessionsByDate[date]; if (!dateSessions) return []; - return dateSessions[projectPath] || []; + const sessions = dateSessions[projectPath] || []; + // Validate worktree configs before returning + return sessions.map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + })); } /** diff --git a/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts b/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts new file mode 100644 index 0000000000..739b58bd4d --- /dev/null +++ b/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts @@ -0,0 +1,609 @@ +import { writeFileSync } from 'fs'; +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import type * as pty from '@lydell/node-pty'; +import type { TerminalProcess } from '../types'; + +const mockGetClaudeCliInvocation = vi.fn(); +const mockGetClaudeProfileManager = vi.fn(); +const mockPersistSession = vi.fn(); +const mockReleaseSessionId = vi.fn(); + +const createMockDisposable = (): pty.IDisposable => ({ dispose: vi.fn() }); + +const createMockPty = (): pty.IPty => ({ + pid: 123, + cols: 80, + rows: 24, + process: 'bash', + handleFlowControl: false, + onData: vi.fn(() => createMockDisposable()), + onExit: vi.fn(() => createMockDisposable()), + write: vi.fn(), + resize: vi.fn(), + pause: vi.fn(), + resume: vi.fn(), + kill: vi.fn(), + clear: vi.fn(), +}); + +const createMockTerminal = (overrides: Partial = {}): TerminalProcess => ({ + id: 'term-1', + pty: createMockPty(), + outputBuffer: '', + isClaudeMode: false, + claudeSessionId: undefined, + claudeProfileId: undefined, + title: 'Claude', + cwd: '/tmp/project', + projectPath: '/tmp/project', + ...overrides, +}); + +vi.mock('../../claude-cli-utils', () => ({ + getClaudeCliInvocation: mockGetClaudeCliInvocation, +})); + +vi.mock('../../claude-profile-manager', () => ({ + getClaudeProfileManager: mockGetClaudeProfileManager, +})); + +vi.mock('fs', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + writeFileSync: vi.fn(), + }; +}); + +vi.mock('../session-handler', () => ({ + persistSession: mockPersistSession, + releaseSessionId: mockReleaseSessionId, +})); + +vi.mock('os', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + tmpdir: vi.fn(() => '/tmp'), + }; +}); + +describe('claude-integration-handler', () => { + beforeEach(() => { + mockGetClaudeCliInvocation.mockClear(); + mockGetClaudeProfileManager.mockClear(); + mockPersistSession.mockClear(); + mockReleaseSessionId.mockClear(); + vi.mocked(writeFileSync).mockClear(); + }); + + it('uses the resolved CLI path and PATH prefix when invoking Claude', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: "/opt/claude bin/claude's", + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal(); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("cd '/tmp/project' && "); + expect(written).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(written).toContain("'/opt/claude bin/claude'\\''s'"); + expect(mockReleaseSessionId).toHaveBeenCalledWith('term-1'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + expect(profileManager.getActiveProfile).toHaveBeenCalled(); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('default'); + }); + + it('converts Windows PATH separators to colons for bash invocations', async () => { + const originalPlatform = Object.getOwnPropertyDescriptor(process, 'platform'); + Object.defineProperty(process, 'platform', { value: 'win32' }); + + try { + mockGetClaudeCliInvocation.mockReturnValue({ + command: 'C:\\Tools\\claude\\claude.exe', + env: { PATH: 'C:\\Tools\\claude;C:\\Windows' }, + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal(); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("PATH='C:\\Tools\\claude:C:\\Windows' "); + expect(written).not.toContain('C:\\Tools\\claude;C:\\Windows'); + } finally { + if (originalPlatform) { + Object.defineProperty(process, 'platform', originalPlatform); + } + } + }); + + it('throws when invokeClaude cannot resolve the CLI invocation', async () => { + mockGetClaudeCliInvocation.mockImplementation(() => { + throw new Error('boom'); + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-err' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + expect(() => invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn())).toThrow('boom'); + expect(mockReleaseSessionId).toHaveBeenCalledWith('term-err'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('throws when resumeClaude cannot resolve the CLI invocation', async () => { + mockGetClaudeCliInvocation.mockImplementation(() => { + throw new Error('boom'); + }); + + const terminal = createMockTerminal({ + id: 'term-err-2', + cwd: undefined, + projectPath: '/tmp/project', + }); + + const { resumeClaude } = await import('../claude-integration-handler'); + expect(() => resumeClaude(terminal, 'abc123', () => null)).toThrow('boom'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('throws when writing the OAuth token temp file fails', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: '/opt/claude/bin/claude', + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-err', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + vi.mocked(writeFileSync).mockImplementationOnce(() => { + throw new Error('disk full'); + }); + + const terminal = createMockTerminal({ id: 'term-err-3' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + expect(() => invokeClaude(terminal, '/tmp/project', 'prof-err', () => null, vi.fn())).toThrow('disk full'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('uses the temp token flow when the active profile has an oauth token', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-1', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1234); + + const terminal = createMockTerminal({ id: 'term-3' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-1', () => null, vi.fn()); + + const tokenPath = vi.mocked(writeFileSync).mock.calls[0]?.[0] as string; + const tokenContents = vi.mocked(writeFileSync).mock.calls[0]?.[1] as string; + expect(tokenPath).toMatch(/^\/tmp\/\.claude-token-1234-[0-9a-f]{16}$/); + expect(tokenContents).toBe("export CLAUDE_CODE_OAUTH_TOKEN='token-value'\n"); + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("HISTFILE= HISTCONTROL=ignorespace "); + expect(written).toContain(`source '${tokenPath}'`); + expect(written).toContain(`rm -f '${tokenPath}'`); + expect(written).toContain(`exec '${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-1'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + + nowSpy.mockRestore(); + }); + + it('prefers the temp token flow when profile has both oauth token and config dir', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-both', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + configDir: '/tmp/claude-config', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(5678); + + const terminal = createMockTerminal({ id: 'term-both' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-both', () => null, vi.fn()); + + const tokenPath = vi.mocked(writeFileSync).mock.calls[0]?.[0] as string; + const tokenContents = vi.mocked(writeFileSync).mock.calls[0]?.[1] as string; + expect(tokenPath).toMatch(/^\/tmp\/\.claude-token-5678-[0-9a-f]{16}$/); + expect(tokenContents).toBe("export CLAUDE_CODE_OAUTH_TOKEN='token-value'\n"); + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`source '${tokenPath}'`); + expect(written).toContain(`rm -f '${tokenPath}'`); + expect(written).toContain(`exec '${command}'`); + expect(written).not.toContain('CLAUDE_CONFIG_DIR='); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-both'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-both'); + + nowSpy.mockRestore(); + }); + + it('handles missing profiles by falling back to the default command', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => undefined), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-6' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'missing', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`'${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('missing'); + expect(profileManager.markProfileUsed).not.toHaveBeenCalled(); + }); + + it('uses the config dir flow when the active profile has a config dir', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-2', + name: 'Work', + isDefault: false, + configDir: '/tmp/claude-config', + })), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-4' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-2', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("HISTFILE= HISTCONTROL=ignorespace "); + expect(written).toContain("CLAUDE_CONFIG_DIR='/tmp/claude-config'"); + expect(written).toContain(`exec '${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-2'); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-2'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('uses profile switching when a non-default profile is requested', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-3', + name: 'Team', + isDefault: false, + })), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-5' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-3', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`'${command}'`); + expect(written).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-3'); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-3'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('uses --continue regardless of sessionId (sessionId is deprecated)', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: '/opt/claude/bin/claude', + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + + const terminal = createMockTerminal({ + id: 'term-2', + cwd: undefined, + projectPath: '/tmp/project', + }); + + const { resumeClaude } = await import('../claude-integration-handler'); + + // Even when sessionId is passed, it should be ignored and --continue used + resumeClaude(terminal, 'abc123', () => null); + + const resumeCall = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(resumeCall).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(resumeCall).toContain("'/opt/claude/bin/claude' --continue"); + expect(resumeCall).not.toContain('--resume'); + // sessionId is cleared because --continue doesn't track specific sessions + expect(terminal.claudeSessionId).toBeUndefined(); + expect(terminal.isClaudeMode).toBe(true); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + + vi.mocked(terminal.pty.write).mockClear(); + mockPersistSession.mockClear(); + terminal.projectPath = undefined; + terminal.isClaudeMode = false; + resumeClaude(terminal, undefined, () => null); + const continueCall = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(continueCall).toContain("'/opt/claude/bin/claude' --continue"); + expect(terminal.isClaudeMode).toBe(true); + expect(terminal.claudeSessionId).toBeUndefined(); + expect(mockPersistSession).not.toHaveBeenCalled(); + }); +}); + +/** + * Unit tests for helper functions + */ +describe('claude-integration-handler - Helper Functions', () => { + describe('buildClaudeShellCommand', () => { + it('should build default command without cwd or PATH prefix', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand('', '', "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("'/opt/bin/claude'\r"); + }); + + it('should build command with cwd', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand("cd '/tmp/project' && ", '', "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("cd '/tmp/project' && '/opt/bin/claude'\r"); + }); + + it('should build command with PATH prefix', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand('', "PATH='/custom/path' ", "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("PATH='/custom/path' '/opt/bin/claude'\r"); + }); + + it('should build temp-file method command with history-safe prefixes', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + "cd '/tmp/project' && ", + "PATH='/opt/bin' ", + "'/opt/bin/claude'", + { method: 'temp-file', escapedTempFile: "'/tmp/.token-123'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain("cd '/tmp/project' && "); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).toContain("PATH='/opt/bin' "); + expect(result).toContain("source '/tmp/.token-123'"); + expect(result).toContain("rm -f '/tmp/.token-123'"); + expect(result).toContain("exec '/opt/bin/claude'"); + }); + + it('should build config-dir method command with CLAUDE_CONFIG_DIR', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + "cd '/tmp/project' && ", + "PATH='/opt/bin' ", + "'/opt/bin/claude'", + { method: 'config-dir', escapedConfigDir: "'/home/user/.claude-work'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain("cd '/tmp/project' && "); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).toContain("CLAUDE_CONFIG_DIR='/home/user/.claude-work'"); + expect(result).toContain("PATH='/opt/bin' "); + expect(result).toContain("exec '/opt/bin/claude'"); + }); + + it('should handle empty cwdCommand for temp-file method', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + '', + '', + "'/opt/bin/claude'", + { method: 'temp-file', escapedTempFile: "'/tmp/.token'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).not.toContain('cd '); + expect(result).toContain("source '/tmp/.token'"); + }); + }); + + describe('finalizeClaudeInvoke', () => { + it('should set terminal title to "Claude" for default profile', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockWindow = { + webContents: { send: vi.fn() } + }; + + finalizeClaudeInvoke( + terminal, + { name: 'Default', isDefault: true }, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(terminal.title).toBe('Claude'); + }); + + it('should set terminal title to "Claude (ProfileName)" for non-default profile', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockWindow = { + webContents: { send: vi.fn() } + }; + + finalizeClaudeInvoke( + terminal, + { name: 'Work Profile', isDefault: false }, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(terminal.title).toBe('Claude (Work Profile)'); + }); + + it('should send IPC message to renderer', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockSend = vi.fn(); + const mockWindow = { + webContents: { send: mockSend } + }; + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(mockSend).toHaveBeenCalledWith( + expect.stringContaining('title'), + terminal.id, + 'Claude' + ); + }); + + it('should persist session when terminal has projectPath', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal({ projectPath: '/tmp/project' }); + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => null, + vi.fn() + ); + + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('should call onSessionCapture when projectPath is provided', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockOnSessionCapture = vi.fn(); + const startTime = Date.now(); + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + startTime, + () => null, + mockOnSessionCapture + ); + + expect(mockOnSessionCapture).toHaveBeenCalledWith(terminal.id, '/tmp/project', startTime); + }); + + it('should not crash when getWindow returns null', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + + expect(() => { + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => null, + vi.fn() + ); + }).not.toThrow(); + }); + }); +}); diff --git a/apps/frontend/src/main/terminal/claude-integration-handler.ts b/apps/frontend/src/main/terminal/claude-integration-handler.ts index ae761772bf..ae420b2d97 100644 --- a/apps/frontend/src/main/terminal/claude-integration-handler.ts +++ b/apps/frontend/src/main/terminal/claude-integration-handler.ts @@ -5,13 +5,16 @@ import * as os from 'os'; import * as fs from 'fs'; +import { promises as fsPromises } from 'fs'; import * as path from 'path'; +import * as crypto from 'crypto'; import { IPC_CHANNELS } from '../../shared/constants'; -import { getClaudeProfileManager } from '../claude-profile-manager'; +import { getClaudeProfileManager, initializeClaudeProfileManager } from '../claude-profile-manager'; import * as OutputParser from './output-parser'; import * as SessionHandler from './session-handler'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { escapeShellArg, buildCdCommand } from '../../shared/utils/shell-escape'; +import { getClaudeCliInvocation, getClaudeCliInvocationAsync } from '../claude-cli-utils'; import type { TerminalProcess, WindowGetter, @@ -19,6 +22,137 @@ import type { OAuthTokenEvent } from './types'; +function normalizePathForBash(envPath: string): string { + return process.platform === 'win32' ? envPath.replace(/;/g, ':') : envPath; +} + +// ============================================================================ +// SHARED HELPERS - Used by both sync and async invokeClaude +// ============================================================================ + +/** + * Configuration for building Claude shell commands using discriminated union. + * This provides type safety by ensuring the correct options are provided for each method. + */ +type ClaudeCommandConfig = + | { method: 'default' } + | { method: 'temp-file'; escapedTempFile: string } + | { method: 'config-dir'; escapedConfigDir: string }; + +/** + * Build the shell command for invoking Claude CLI. + * + * Generates the appropriate command string based on the invocation method: + * - 'default': Simple command execution + * - 'temp-file': Sources OAuth token from temp file, then removes it + * - 'config-dir': Sets CLAUDE_CONFIG_DIR for custom profile location + * + * All non-default methods include history-safe prefixes (HISTFILE=, HISTCONTROL=) + * to prevent sensitive data from appearing in shell history. + * + * @param cwdCommand - Command to change directory (empty string if no change needed) + * @param pathPrefix - PATH prefix for Claude CLI (empty string if not needed) + * @param escapedClaudeCmd - Shell-escaped Claude CLI command + * @param config - Configuration object with method and required options (discriminated union) + * @returns Complete shell command string ready for terminal.pty.write() + * + * @example + * // Default method + * buildClaudeShellCommand('cd /path && ', 'PATH=/bin ', 'claude', { method: 'default' }); + * // Returns: 'cd /path && PATH=/bin claude\r' + * + * // Temp file method + * buildClaudeShellCommand('', '', 'claude', { method: 'temp-file', escapedTempFile: '/tmp/token' }); + * // Returns: 'clear && HISTFILE= HISTCONTROL=ignorespace bash -c "source /tmp/token && rm -f /tmp/token && exec claude"\r' + */ +export function buildClaudeShellCommand( + cwdCommand: string, + pathPrefix: string, + escapedClaudeCmd: string, + config: ClaudeCommandConfig +): string { + switch (config.method) { + case 'temp-file': + return `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace ${pathPrefix}bash -c "source ${config.escapedTempFile} && rm -f ${config.escapedTempFile} && exec ${escapedClaudeCmd}"\r`; + + case 'config-dir': + return `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace CLAUDE_CONFIG_DIR=${config.escapedConfigDir} ${pathPrefix}bash -c "exec ${escapedClaudeCmd}"\r`; + + default: + return `${cwdCommand}${pathPrefix}${escapedClaudeCmd}\r`; + } +} + +/** + * Profile information for terminal title generation + */ +interface ProfileInfo { + /** Profile name for display */ + name?: string; + /** Whether this is the default profile */ + isDefault?: boolean; +} + +/** + * Callback type for session capture + */ +type SessionCaptureCallback = (terminalId: string, projectPath: string, startTime: number) => void; + +/** + * Finalize terminal state after invoking Claude. + * + * Updates terminal title, sends IPC notification to renderer, persists session, + * and calls the session capture callback. This consolidates the post-invocation + * logic used by both sync and async invoke methods. + * + * @param terminal - The terminal process to update + * @param activeProfile - The profile being used (or undefined for default) + * @param projectPath - The project path (for session capture) + * @param startTime - Timestamp when invocation started + * @param getWindow - Function to get the BrowserWindow + * @param onSessionCapture - Callback for session capture + * + * @example + * finalizeClaudeInvoke( + * terminal, + * { name: 'Work', isDefault: false }, + * '/path/to/project', + * Date.now(), + * () => mainWindow, + * (id, path, time) => console.log('Session captured') + * ); + */ +export function finalizeClaudeInvoke( + terminal: TerminalProcess, + activeProfile: ProfileInfo | undefined, + projectPath: string | undefined, + startTime: number, + getWindow: WindowGetter, + onSessionCapture: SessionCaptureCallback +): void { + // Set terminal title based on profile + const title = activeProfile && !activeProfile.isDefault + ? `Claude (${activeProfile.name})` + : 'Claude'; + terminal.title = title; + + // Notify renderer of title change + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, title); + } + + // Persist session if project path is available + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } + + // Call session capture callback if project path provided + if (projectPath) { + onSessionCapture(terminal.id, projectPath, startTime); + } +} + /** * Handle rate limit detection and profile switching */ @@ -211,6 +345,7 @@ export function invokeClaude( debugLog('[ClaudeIntegration:invokeClaude] CWD:', cwd); terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); terminal.claudeSessionId = undefined; const startTime = Date.now(); @@ -232,8 +367,12 @@ export function invokeClaude( isDefault: activeProfile?.isDefault }); - // Use safe shell escaping to prevent command injection const cwdCommand = buildCdCommand(cwd); + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; const needsEnvOverride = profileId && profileId !== previousProfileId; debugLog('[ClaudeIntegration:invokeClaude] Environment override check:', { @@ -250,30 +389,30 @@ export function invokeClaude( }); if (token) { - const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}`); + const nonce = crypto.randomBytes(8).toString('hex'); + const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}-${nonce}`); + const escapedTempFile = escapeShellArg(tempFile); debugLog('[ClaudeIntegration:invokeClaude] Writing token to temp file:', tempFile); - fs.writeFileSync(tempFile, `export CLAUDE_CODE_OAUTH_TOKEN="${token}"\n`, { mode: 0o600 }); - - // Clear terminal and run command without adding to shell history: - // - HISTFILE= disables history file writing for the current command - // - HISTCONTROL=ignorespace causes commands starting with space to be ignored - // - Leading space ensures the command is ignored even if HISTCONTROL was already set - // - Uses subshell (...) to isolate environment changes - // This prevents temp file paths from appearing in shell history - const command = `clear && ${cwdCommand} HISTFILE= HISTCONTROL=ignorespace bash -c 'source "${tempFile}" && rm -f "${tempFile}" && exec claude'\r`; + fs.writeFileSync( + tempFile, + `export CLAUDE_CODE_OAUTH_TOKEN=${escapeShellArg(token)}\n`, + { mode: 0o600 } + ); + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'temp-file', escapedTempFile }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (temp file method, history-safe)'); terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (temp file) =========='); return; } else if (activeProfile.configDir) { - // Clear terminal and run command without adding to shell history: - // Same history-disabling technique as temp file method above - // SECURITY: Use escapeShellArg for configDir to prevent command injection - // Set CLAUDE_CONFIG_DIR as env var before bash -c to avoid embedding user input in the command string const escapedConfigDir = escapeShellArg(activeProfile.configDir); - const command = `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace CLAUDE_CONFIG_DIR=${escapedConfigDir} bash -c 'exec claude'\r`; + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'config-dir', escapedConfigDir }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (configDir method, history-safe)'); terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (configDir) =========='); return; } else { @@ -285,7 +424,7 @@ export function invokeClaude( debugLog('[ClaudeIntegration:invokeClaude] Using terminal environment for non-default profile:', activeProfile.name); } - const command = `${cwdCommand}claude\r`; + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'default' }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (default method):', command); terminal.pty.write(command); @@ -293,50 +432,228 @@ export function invokeClaude( profileManager.markProfileUsed(activeProfile.id); } + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (default) =========='); +} + +/** + * Resume Claude session in the current directory + * + * Uses `claude --continue` which resumes the most recent conversation in the + * current directory. This is simpler and more reliable than tracking session IDs, + * since Auto Claude already restores terminals to their correct cwd/projectPath. + * + * Note: The sessionId parameter is kept for backwards compatibility but is ignored. + * Claude Code's --resume flag expects user-named sessions (set via /rename), not + * internal session file IDs. + */ +export function resumeClaude( + terminal: TerminalProcess, + _sessionId: string | undefined, + getWindow: WindowGetter +): void { + terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + + // Always use --continue which resumes the most recent session in the current directory. + // This is more reliable than --resume with session IDs since Auto Claude already restores + // terminals to their correct cwd/projectPath. + // + // Note: We clear claudeSessionId because --continue doesn't track specific sessions, + // and we don't want stale IDs persisting through SessionHandler.persistSession(). + terminal.claudeSessionId = undefined; + + // Deprecation warning for callers still passing sessionId + if (_sessionId) { + console.warn('[ClaudeIntegration:resumeClaude] sessionId parameter is deprecated and ignored; using claude --continue instead'); + } + + const command = `${pathPrefix}${escapedClaudeCmd} --continue`; + + terminal.pty.write(`${command}\r`); + + // Update terminal title in main process and notify renderer + terminal.title = 'Claude'; const win = getWindow(); if (win) { - const title = activeProfile && !activeProfile.isDefault - ? `Claude (${activeProfile.name})` - : 'Claude'; - win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, title); + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, 'Claude'); } + // Persist session with updated title if (terminal.projectPath) { SessionHandler.persistSession(terminal); } +} - if (projectPath) { - onSessionCapture(terminal.id, projectPath, startTime); +// ============================================================================ +// ASYNC VERSIONS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Invoke Claude asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses async CLI detection which doesn't block on subprocess calls. + */ +export async function invokeClaudeAsync( + terminal: TerminalProcess, + cwd: string | undefined, + profileId: string | undefined, + getWindow: WindowGetter, + onSessionCapture: (terminalId: string, projectPath: string, startTime: number) => void +): Promise { + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE START (async) =========='); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Terminal ID:', terminal.id); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Requested profile ID:', profileId); + debugLog('[ClaudeIntegration:invokeClaudeAsync] CWD:', cwd); + + terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + terminal.claudeSessionId = undefined; + + const startTime = Date.now(); + const projectPath = cwd || terminal.projectPath || terminal.cwd; + + // Ensure profile manager is initialized (async, yields to event loop) + const profileManager = await initializeClaudeProfileManager(); + const activeProfile = profileId + ? profileManager.getProfile(profileId) + : profileManager.getActiveProfile(); + + const previousProfileId = terminal.claudeProfileId; + terminal.claudeProfileId = activeProfile?.id; + + debugLog('[ClaudeIntegration:invokeClaudeAsync] Profile resolution:', { + previousProfileId, + newProfileId: activeProfile?.id, + profileName: activeProfile?.name, + hasOAuthToken: !!activeProfile?.oauthToken, + isDefault: activeProfile?.isDefault + }); + + // Async CLI invocation - non-blocking + const cwdCommand = buildCdCommand(cwd); + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + const needsEnvOverride = profileId && profileId !== previousProfileId; + + debugLog('[ClaudeIntegration:invokeClaudeAsync] Environment override check:', { + profileIdProvided: !!profileId, + previousProfileId, + needsEnvOverride + }); + + if (needsEnvOverride && activeProfile && !activeProfile.isDefault) { + const token = profileManager.getProfileToken(activeProfile.id); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Token retrieval:', { + hasToken: !!token, + tokenLength: token?.length + }); + + if (token) { + const nonce = crypto.randomBytes(8).toString('hex'); + const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}-${nonce}`); + const escapedTempFile = escapeShellArg(tempFile); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Writing token to temp file:', tempFile); + await fsPromises.writeFile( + tempFile, + `export CLAUDE_CODE_OAUTH_TOKEN=${escapeShellArg(token)}\n`, + { mode: 0o600 } + ); + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'temp-file', escapedTempFile }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (temp file method, history-safe)'); + terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (temp file) =========='); + return; + } else if (activeProfile.configDir) { + const escapedConfigDir = escapeShellArg(activeProfile.configDir); + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'config-dir', escapedConfigDir }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (configDir method, history-safe)'); + terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (configDir) =========='); + return; + } else { + debugLog('[ClaudeIntegration:invokeClaudeAsync] WARNING: No token or configDir available for non-default profile'); + } } - debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (default) =========='); + if (activeProfile && !activeProfile.isDefault) { + debugLog('[ClaudeIntegration:invokeClaudeAsync] Using terminal environment for non-default profile:', activeProfile.name); + } + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'default' }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (default method):', command); + terminal.pty.write(command); + + if (activeProfile) { + profileManager.markProfileUsed(activeProfile.id); + } + + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (default) =========='); } /** - * Resume Claude with optional session ID + * Resume Claude asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses async CLI detection which doesn't block on subprocess calls. */ -export function resumeClaude( +export async function resumeClaudeAsync( terminal: TerminalProcess, sessionId: string | undefined, getWindow: WindowGetter -): void { +): Promise { terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + + // Async CLI invocation - non-blocking + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + + // Always use --continue which resumes the most recent session in the current directory. + // This is more reliable than --resume with session IDs since Auto Claude already restores + // terminals to their correct cwd/projectPath. + // + // Note: We clear claudeSessionId because --continue doesn't track specific sessions, + // and we don't want stale IDs persisting through SessionHandler.persistSession(). + terminal.claudeSessionId = undefined; - let command: string; + // Deprecation warning for callers still passing sessionId if (sessionId) { - // SECURITY: Escape sessionId to prevent command injection - command = `claude --resume ${escapeShellArg(sessionId)}`; - terminal.claudeSessionId = sessionId; - } else { - command = 'claude --continue'; + console.warn('[ClaudeIntegration:resumeClaudeAsync] sessionId parameter is deprecated and ignored; using claude --continue instead'); } + const command = `${pathPrefix}${escapedClaudeCmd} --continue`; + terminal.pty.write(`${command}\r`); + terminal.title = 'Claude'; const win = getWindow(); if (win) { win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, 'Claude'); } + + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } } /** @@ -443,7 +760,7 @@ export async function switchClaudeProfile( terminal: TerminalProcess, profileId: string, getWindow: WindowGetter, - invokeClaudeCallback: (terminalId: string, cwd: string | undefined, profileId: string) => void, + invokeClaudeCallback: (terminalId: string, cwd: string | undefined, profileId: string) => Promise, clearRateLimitCallback: (terminalId: string) => void ): Promise<{ success: boolean; error?: string }> { // Always-on tracing @@ -461,7 +778,8 @@ export async function switchClaudeProfile( cwd: terminal.cwd }); - const profileManager = getClaudeProfileManager(); + // Ensure profile manager is initialized (async, yields to event loop) + const profileManager = await initializeClaudeProfileManager(); const profile = profileManager.getProfile(profileId); console.warn('[ClaudeIntegration:switchClaudeProfile] Profile found:', profile?.name || 'NOT FOUND'); @@ -529,7 +847,7 @@ export async function switchClaudeProfile( projectPath, profileId }); - invokeClaudeCallback(terminal.id, projectPath, profileId); + await invokeClaudeCallback(terminal.id, projectPath, profileId); debugLog('[ClaudeIntegration:switchClaudeProfile] Setting active profile in profile manager'); profileManager.setActiveProfile(profileId); diff --git a/apps/frontend/src/main/terminal/output-parser.ts b/apps/frontend/src/main/terminal/output-parser.ts index 72458ef254..e955935aaa 100644 --- a/apps/frontend/src/main/terminal/output-parser.ts +++ b/apps/frontend/src/main/terminal/output-parser.ts @@ -79,3 +79,83 @@ export function hasRateLimitMessage(data: string): boolean { export function hasOAuthToken(data: string): boolean { return OAUTH_TOKEN_PATTERN.test(data); } + +/** + * Patterns indicating Claude Code is busy/processing + * These appear when Claude is actively thinking or working + * + * IMPORTANT: These must be universal patterns that work for ALL users, + * not just custom terminal configurations with progress bars. + */ +const CLAUDE_BUSY_PATTERNS = [ + // Universal Claude Code indicators + /^●/m, // Claude's response bullet point (appears when Claude is responding) + /\u25cf/, // Unicode bullet point (●) + + // Tool execution indicators (Claude is running tools) + /^(Read|Write|Edit|Bash|Grep|Glob|Task|WebFetch|WebSearch|TodoWrite)\(/m, + /^\s*\d+\s*[β”‚|]\s*/m, // Line numbers in file output (Claude reading/showing files) + + // Streaming/thinking indicators + /Loading\.\.\./i, + /Thinking\.\.\./i, + /Analyzing\.\.\./i, + /Processing\.\.\./i, + /Working\.\.\./i, + /Searching\.\.\./i, + /Creating\.\.\./i, + /Updating\.\.\./i, + /Running\.\.\./i, + + // Custom progress bar patterns (for users who have them) + /\[Opus\s*\d*\.?\d*\].*\d+%/i, // Opus model progress + /\[Sonnet\s*\d*\.?\d*\].*\d+%/i, // Sonnet model progress + /\[Haiku\s*\d*\.?\d*\].*\d+%/i, // Haiku model progress + /\[Claude\s*\d*\.?\d*\].*\d+%/i, // Generic Claude progress + /β–‘+/, // Progress bar characters + /β–“+/, // Progress bar characters + /β–ˆ+/, // Progress bar characters (filled) +]; + +/** + * Patterns indicating Claude Code is idle/ready for input + * The prompt character at the start of a line indicates Claude is waiting + */ +const CLAUDE_IDLE_PATTERNS = [ + /^>\s*$/m, // Just "> " prompt on its own line + /\n>\s*$/, // "> " at end after newline + /^\s*>\s+$/m, // "> " with possible whitespace +]; + +/** + * Check if output indicates Claude is busy (processing) + */ +export function isClaudeBusyOutput(data: string): boolean { + return CLAUDE_BUSY_PATTERNS.some(pattern => pattern.test(data)); +} + +/** + * Check if output indicates Claude is idle (ready for input) + */ +export function isClaudeIdleOutput(data: string): boolean { + return CLAUDE_IDLE_PATTERNS.some(pattern => pattern.test(data)); +} + +/** + * Determine Claude busy state from output + * Returns: 'busy' | 'idle' | null (no change detected) + */ +export function detectClaudeBusyState(data: string): 'busy' | 'idle' | null { + // Check for busy indicators FIRST - they're more definitive + // Progress bars and "Loading..." mean Claude is definitely working, + // even if there's a ">" prompt visible elsewhere in the output + if (isClaudeBusyOutput(data)) { + return 'busy'; + } + // Only check for idle if no busy indicators found + // The ">" prompt alone at end of output means Claude is waiting for input + if (isClaudeIdleOutput(data)) { + return 'idle'; + } + return null; +} diff --git a/apps/frontend/src/main/terminal/pty-manager.ts b/apps/frontend/src/main/terminal/pty-manager.ts index d118dca73c..bd38c07a5c 100644 --- a/apps/frontend/src/main/terminal/pty-manager.ts +++ b/apps/frontend/src/main/terminal/pty-manager.ts @@ -5,9 +5,65 @@ import * as pty from '@lydell/node-pty'; import * as os from 'os'; +import { existsSync } from 'fs'; import type { TerminalProcess, WindowGetter } from './types'; import { IPC_CHANNELS } from '../../shared/constants'; import { getClaudeProfileManager } from '../claude-profile-manager'; +import { readSettingsFile } from '../settings-utils'; +import type { SupportedTerminal } from '../../shared/types/settings'; + +/** + * Windows shell paths for different terminal preferences + */ +const WINDOWS_SHELL_PATHS: Record = { + powershell: [ + 'C:\\Program Files\\PowerShell\\7\\pwsh.exe', // PowerShell 7 (Core) + 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe', // Windows PowerShell 5.1 + ], + windowsterminal: [ + 'C:\\Program Files\\PowerShell\\7\\pwsh.exe', // Prefer PowerShell Core in Windows Terminal + 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe', + ], + cmd: [ + 'C:\\Windows\\System32\\cmd.exe', + ], + gitbash: [ + 'C:\\Program Files\\Git\\bin\\bash.exe', + 'C:\\Program Files (x86)\\Git\\bin\\bash.exe', + ], + cygwin: [ + 'C:\\cygwin64\\bin\\bash.exe', + 'C:\\cygwin\\bin\\bash.exe', + ], + msys2: [ + 'C:\\msys64\\usr\\bin\\bash.exe', + 'C:\\msys32\\usr\\bin\\bash.exe', + ], +}; + +/** + * Get the Windows shell executable based on preferred terminal setting + */ +function getWindowsShell(preferredTerminal: SupportedTerminal | undefined): string { + // If no preference or 'system', use COMSPEC (usually cmd.exe) + if (!preferredTerminal || preferredTerminal === 'system') { + return process.env.COMSPEC || 'cmd.exe'; + } + + // Check if we have paths defined for this terminal type + const paths = WINDOWS_SHELL_PATHS[preferredTerminal]; + if (paths) { + // Find the first existing shell + for (const shellPath of paths) { + if (existsSync(shellPath)) { + return shellPath; + } + } + } + + // Fallback to COMSPEC for unrecognized terminals + return process.env.COMSPEC || 'cmd.exe'; +} /** * Spawn a new PTY process with appropriate shell and environment @@ -18,13 +74,25 @@ export function spawnPtyProcess( rows: number, profileEnv?: Record ): pty.IPty { + // Read user's preferred terminal setting + const settings = readSettingsFile(); + const preferredTerminal = settings?.preferredTerminal as SupportedTerminal | undefined; + const shell = process.platform === 'win32' - ? process.env.COMSPEC || 'cmd.exe' + ? getWindowsShell(preferredTerminal) : process.env.SHELL || '/bin/zsh'; const shellArgs = process.platform === 'win32' ? [] : ['-l']; - console.warn('[PtyManager] Spawning shell:', shell, shellArgs); + console.warn('[PtyManager] Spawning shell:', shell, shellArgs, '(preferred:', preferredTerminal || 'system', ')'); + + // Create a clean environment without DEBUG to prevent Claude Code from + // enabling debug mode when the Electron app is run in development mode. + // Also remove ANTHROPIC_API_KEY to ensure Claude Code uses OAuth tokens + // (CLAUDE_CODE_OAUTH_TOKEN from profileEnv) instead of API keys that may + // be present in the shell environment. Without this, Claude Code would + // show "Claude API" instead of "Claude Max" when ANTHROPIC_API_KEY is set. + const { DEBUG: _DEBUG, ANTHROPIC_API_KEY: _ANTHROPIC_API_KEY, ...cleanEnv } = process.env; return pty.spawn(shell, shellArgs, { name: 'xterm-256color', @@ -32,7 +100,7 @@ export function spawnPtyProcess( rows, cwd: cwd || os.homedir(), env: { - ...process.env, + ...cleanEnv, ...profileEnv, TERM: 'xterm-256color', COLORTERM: 'truecolor', diff --git a/apps/frontend/src/main/terminal/session-handler.ts b/apps/frontend/src/main/terminal/session-handler.ts index 9ac08fe5a7..38edfa1e1a 100644 --- a/apps/frontend/src/main/terminal/session-handler.ts +++ b/apps/frontend/src/main/terminal/session-handler.ts @@ -11,6 +11,48 @@ import { getTerminalSessionStore, type TerminalSession } from '../terminal-sessi import { IPC_CHANNELS } from '../../shared/constants'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; +/** + * Track session IDs that have been claimed by terminals to prevent race conditions. + * When multiple terminals invoke Claude simultaneously, this prevents them from + * all capturing the same session ID. + * + * Key: sessionId, Value: terminalId that claimed it + */ +const claimedSessionIds: Map = new Map(); + +/** + * Claim a session ID for a terminal. Returns true if successful, false if already claimed. + */ +export function claimSessionId(sessionId: string, terminalId: string): boolean { + const existingClaim = claimedSessionIds.get(sessionId); + if (existingClaim && existingClaim !== terminalId) { + debugLog('[SessionHandler] Session ID already claimed:', sessionId, 'by terminal:', existingClaim); + return false; + } + claimedSessionIds.set(sessionId, terminalId); + debugLog('[SessionHandler] Claimed session ID:', sessionId, 'for terminal:', terminalId); + return true; +} + +/** + * Release a session ID claim when a terminal is destroyed or session changes. + */ +export function releaseSessionId(terminalId: string): void { + for (const [sessionId, claimedBy] of claimedSessionIds.entries()) { + if (claimedBy === terminalId) { + claimedSessionIds.delete(sessionId); + debugLog('[SessionHandler] Released session ID:', sessionId, 'from terminal:', terminalId); + } + } +} + +/** + * Get all currently claimed session IDs (for exclusion during search). + */ +export function getClaimedSessionIds(): Set { + return new Set(claimedSessionIds.keys()); +} + /** * Get the Claude project slug from a project path. * Claude uses the full path with forward slashes replaced by dashes. @@ -56,9 +98,19 @@ export function findMostRecentClaudeSession(projectPath: string): string | null } /** - * Find a Claude session created/modified after a given timestamp + * Find a Claude session created/modified after a given timestamp. + * Excludes session IDs that have already been claimed by other terminals + * to prevent race conditions when multiple terminals invoke Claude simultaneously. + * + * @param projectPath - The project path to search sessions for + * @param afterTimestamp - Only consider sessions modified after this timestamp + * @param excludeSessionIds - Optional set of session IDs to exclude (already claimed) */ -export function findClaudeSessionAfter(projectPath: string, afterTimestamp: number): string | null { +export function findClaudeSessionAfter( + projectPath: string, + afterTimestamp: number, + excludeSessionIds?: Set +): string | null { const slug = getClaudeProjectSlug(projectPath); const claudeProjectDir = path.join(os.homedir(), '.claude', 'projects', slug); @@ -71,17 +123,22 @@ export function findClaudeSessionAfter(projectPath: string, afterTimestamp: numb .filter(f => f.endsWith('.jsonl')) .map(f => ({ name: f, + sessionId: f.replace('.jsonl', ''), path: path.join(claudeProjectDir, f), mtime: fs.statSync(path.join(claudeProjectDir, f)).mtime.getTime() })) .filter(f => f.mtime > afterTimestamp) + // Exclude already-claimed session IDs to prevent race conditions + .filter(f => !excludeSessionIds || !excludeSessionIds.has(f.sessionId)) .sort((a, b) => b.mtime - a.mtime); if (files.length === 0) { return null; } - return files[0].name.replace('.jsonl', ''); + const sessionId = files[0].sessionId; + debugLog('[SessionHandler] Found unclaimed session after timestamp:', sessionId, 'excluded:', excludeSessionIds?.size ?? 0); + return sessionId; } catch (error) { debugError('[SessionHandler] Error finding Claude session:', error); return null; @@ -106,7 +163,8 @@ export function persistSession(terminal: TerminalProcess): void { claudeSessionId: terminal.claudeSessionId, outputBuffer: terminal.outputBuffer, createdAt: new Date().toISOString(), - lastActiveAt: new Date().toISOString() + lastActiveAt: new Date().toISOString(), + worktreeConfig: terminal.worktreeConfig, }; store.saveSession(session); } @@ -183,7 +241,9 @@ export function getSessionsForDate(date: string, projectPath: string): TerminalS } /** - * Attempt to capture Claude session ID by polling the session directory + * Attempt to capture Claude session ID by polling the session directory. + * Uses the claim mechanism to prevent race conditions when multiple terminals + * invoke Claude simultaneously - each terminal will get a unique session ID. */ export function captureClaudeSessionId( terminalId: string, @@ -200,31 +260,44 @@ export function captureClaudeSessionId( const terminal = terminals.get(terminalId); if (!terminal || !terminal.isClaudeMode) { + debugLog('[SessionHandler] Terminal no longer in Claude mode, stopping session capture:', terminalId); return; } if (terminal.claudeSessionId) { + debugLog('[SessionHandler] Terminal already has session ID, stopping capture:', terminalId); return; } - const sessionId = findClaudeSessionAfter(projectPath, startTime); + // Get currently claimed session IDs to exclude from search + const claimedIds = getClaimedSessionIds(); + const sessionId = findClaudeSessionAfter(projectPath, startTime, claimedIds); if (sessionId) { - terminal.claudeSessionId = sessionId; - debugLog('[SessionHandler] Captured Claude session ID from directory:', sessionId); - - if (terminal.projectPath) { - updateClaudeSessionId(terminal.projectPath, terminalId, sessionId); - } - - const win = getWindow(); - if (win) { - win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_SESSION, terminalId, sessionId); + // Try to claim this session ID - if another terminal beat us to it, keep searching + if (claimSessionId(sessionId, terminalId)) { + terminal.claudeSessionId = sessionId; + debugLog('[SessionHandler] Captured and claimed Claude session ID:', sessionId, 'for terminal:', terminalId); + + if (terminal.projectPath) { + updateClaudeSessionId(terminal.projectPath, terminalId, sessionId); + } + + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_SESSION, terminalId, sessionId); + } + } else { + // Session was claimed by another terminal, keep polling for a different one + debugLog('[SessionHandler] Session ID was claimed by another terminal, continuing to poll:', sessionId); + if (attempts < maxAttempts) { + setTimeout(checkForSession, 1000); + } } } else if (attempts < maxAttempts) { setTimeout(checkForSession, 1000); } else { - debugLog('[SessionHandler] Could not capture Claude session ID after', maxAttempts, 'attempts'); + debugLog('[SessionHandler] Could not capture Claude session ID after', maxAttempts, 'attempts for terminal:', terminalId); } }; diff --git a/apps/frontend/src/main/terminal/terminal-event-handler.ts b/apps/frontend/src/main/terminal/terminal-event-handler.ts index 79a5b07387..7f8b061dfc 100644 --- a/apps/frontend/src/main/terminal/terminal-event-handler.ts +++ b/apps/frontend/src/main/terminal/terminal-event-handler.ts @@ -6,6 +6,7 @@ import * as OutputParser from './output-parser'; import * as ClaudeIntegration from './claude-integration-handler'; import type { TerminalProcess, WindowGetter } from './types'; +import { IPC_CHANNELS } from '../../shared/constants'; /** * Event handler callbacks @@ -14,8 +15,12 @@ export interface EventHandlerCallbacks { onClaudeSessionId: (terminal: TerminalProcess, sessionId: string) => void; onRateLimit: (terminal: TerminalProcess, data: string) => void; onOAuthToken: (terminal: TerminalProcess, data: string) => void; + onClaudeBusyChange: (terminal: TerminalProcess, isBusy: boolean) => void; } +// Track the last known busy state per terminal to avoid duplicate events +const lastBusyState = new Map(); + /** * Handle terminal data output */ @@ -39,6 +44,28 @@ export function handleTerminalData( // Check for OAuth token callbacks.onOAuthToken(terminal, data); + + // Detect Claude busy state changes (only when in Claude mode) + if (terminal.isClaudeMode) { + const busyState = OutputParser.detectClaudeBusyState(data); + if (busyState !== null) { + const isBusy = busyState === 'busy'; + const lastState = lastBusyState.get(terminal.id); + + // Only emit if state actually changed + if (lastState !== isBusy) { + lastBusyState.set(terminal.id, isBusy); + callbacks.onClaudeBusyChange(terminal, isBusy); + } + } + } +} + +/** + * Clear busy state tracking for a terminal (call on terminal destruction) + */ +export function clearBusyState(terminalId: string): void { + lastBusyState.delete(terminalId); } /** @@ -64,6 +91,12 @@ export function createEventCallbacks( }, onOAuthToken: (terminal, data) => { ClaudeIntegration.handleOAuthToken(terminal, data, getWindow); + }, + onClaudeBusyChange: (terminal, isBusy) => { + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, terminal.id, isBusy); + } } }; } diff --git a/apps/frontend/src/main/terminal/terminal-lifecycle.ts b/apps/frontend/src/main/terminal/terminal-lifecycle.ts index d0ee85fbf3..c366ed7e81 100644 --- a/apps/frontend/src/main/terminal/terminal-lifecycle.ts +++ b/apps/frontend/src/main/terminal/terminal-lifecycle.ts @@ -4,6 +4,7 @@ */ import * as os from 'os'; +import { existsSync } from 'fs'; import type { TerminalCreateOptions } from '../../shared/types'; import { IPC_CHANNELS } from '../../shared/constants'; import type { TerminalSession } from '../terminal-session-store'; @@ -22,6 +23,9 @@ import { debugLog, debugError } from '../../shared/utils/debug-logger'; export interface RestoreOptions { resumeClaudeSession: boolean; captureSessionId: (terminalId: string, projectPath: string, startTime: number) => void; + /** Callback triggered when a Claude session needs to be resumed. + * Note: sessionId is deprecated and ignored - resumeClaude uses --continue */ + onResumeNeeded?: (terminalId: string, sessionId: string | undefined) => void; } /** @@ -54,8 +58,16 @@ export async function createTerminal( debugLog('[TerminalLifecycle] Injecting OAuth token from active profile'); } + // Validate cwd exists - if the directory doesn't exist (e.g., worktree removed), + // fall back to project path to prevent shell exit with code 1 + let effectiveCwd = cwd; + if (cwd && !existsSync(cwd)) { + debugLog('[TerminalLifecycle] Terminal cwd does not exist, falling back:', cwd, '->', projectPath || os.homedir()); + effectiveCwd = projectPath || os.homedir(); + } + const ptyProcess = PtyManager.spawnPtyProcess( - cwd || os.homedir(), + effectiveCwd || os.homedir(), cols, rows, profileEnv @@ -63,7 +75,7 @@ export async function createTerminal( debugLog('[TerminalLifecycle] PTY process spawned, pid:', ptyProcess.pid); - const terminalCwd = cwd || os.homedir(); + const terminalCwd = effectiveCwd || os.homedir(); const terminal: TerminalProcess = { id, pty: ptyProcess, @@ -111,12 +123,31 @@ export async function restoreTerminal( cols = 80, rows = 24 ): Promise { - debugLog('[TerminalLifecycle] Restoring terminal session:', session.id, 'Claude mode:', session.isClaudeMode); + // Look up the stored session to get the correct isClaudeMode value + // The renderer may pass isClaudeMode: false (by design), but we need the stored value + // to determine whether to auto-resume Claude + const storedSessions = SessionHandler.getSavedSessions(session.projectPath); + const storedSession = storedSessions.find(s => s.id === session.id); + const storedIsClaudeMode = storedSession?.isClaudeMode ?? session.isClaudeMode; + const storedClaudeSessionId = storedSession?.claudeSessionId ?? session.claudeSessionId; + + debugLog('[TerminalLifecycle] Restoring terminal session:', session.id, + 'Passed Claude mode:', session.isClaudeMode, + 'Stored Claude mode:', storedIsClaudeMode, + 'Stored session ID:', storedClaudeSessionId); + + // Validate cwd exists - if the directory was deleted (e.g., worktree removed), + // fall back to project path to prevent shell exit with code 1 + let effectiveCwd = session.cwd; + if (!existsSync(session.cwd)) { + debugLog('[TerminalLifecycle] Session cwd does not exist, falling back to project path:', session.cwd, '->', session.projectPath); + effectiveCwd = session.projectPath || os.homedir(); + } const result = await createTerminal( { id: session.id, - cwd: session.cwd, + cwd: effectiveCwd, cols, rows, projectPath: session.projectPath @@ -135,19 +166,55 @@ export async function restoreTerminal( return { success: false, error: 'Terminal not found after creation' }; } + // Restore title and worktree config from session terminal.title = session.title; + // Only restore worktree config if the worktree directory still exists + // (effectiveCwd matching session.cwd means no fallback was needed) + if (effectiveCwd === session.cwd) { + terminal.worktreeConfig = session.worktreeConfig; + } else { + // Worktree was deleted, clear the config and update terminal's cwd + terminal.worktreeConfig = undefined; + terminal.cwd = effectiveCwd; + debugLog('[TerminalLifecycle] Cleared worktree config for terminal with deleted worktree:', session.id); + } - // Restore Claude mode state without sending resume commands - // The PTY daemon keeps processes alive, so we just need to reconnect to the existing session - if (session.isClaudeMode) { - terminal.isClaudeMode = true; - terminal.claudeSessionId = session.claudeSessionId; + // Send title change event for all restored terminals so renderer updates + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, session.id, session.title); + } - debugLog('[TerminalLifecycle] Restored Claude mode state for session:', session.id, 'sessionId:', session.claudeSessionId); + // Auto-resume Claude if session was in Claude mode + // Use storedIsClaudeMode which comes from the persisted store, + // not the renderer-passed values (renderer always passes isClaudeMode: false) + // + // Note: We no longer require storedClaudeSessionId because resumeClaude uses + // `claude --continue` which resumes the most recent session in the directory + // automatically. Session IDs are deprecated and ignored. + if (options.resumeClaudeSession && storedIsClaudeMode) { + terminal.isClaudeMode = true; + // Don't set claudeSessionId - it's deprecated and --continue doesn't use it + debugLog('[TerminalLifecycle] Auto-resuming Claude session using --continue'); - const win = getWindow(); + // Notify renderer that we're in Claude mode (session ID is deprecated) + // This prevents the renderer from also trying to resume (duplicate command) if (win) { - win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, session.id, session.title); + win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_SESSION, terminal.id, storedClaudeSessionId); + } + + // Persist the restored Claude mode state immediately to avoid data loss + // if app closes before the 30-second periodic save + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } + + // Small delay to ensure PTY is ready before sending resume command + // Note: sessionId parameter is deprecated and ignored by resumeClaude + if (options.onResumeNeeded) { + setTimeout(() => { + options.onResumeNeeded!(terminal.id, storedClaudeSessionId); + }, 500); } } @@ -172,6 +239,8 @@ export async function destroyTerminal( try { SessionHandler.removePersistedSession(terminal); + // Release any claimed session ID for this terminal + SessionHandler.releaseSessionId(id); onCleanup(id); PtyManager.killPty(terminal); terminals.delete(id); diff --git a/apps/frontend/src/main/terminal/terminal-manager.ts b/apps/frontend/src/main/terminal/terminal-manager.ts index f2ab44a7e2..2f86c218bc 100644 --- a/apps/frontend/src/main/terminal/terminal-manager.ts +++ b/apps/frontend/src/main/terminal/terminal-manager.ts @@ -80,6 +80,12 @@ export class TerminalManager { this.terminals, this.getWindow ); + }, + onResumeNeeded: (terminalId, sessionId) => { + // Use async version to avoid blocking main process + this.resumeClaudeAsync(terminalId, sessionId).catch((error) => { + console.error('[terminal-manager] Failed to resume Claude session:', error); + }); } }, cols, @@ -130,8 +136,35 @@ export class TerminalManager { } } + /** + * Invoke Claude in a terminal with optional profile override (async - non-blocking) + */ + async invokeClaudeAsync(id: string, cwd?: string, profileId?: string): Promise { + const terminal = this.terminals.get(id); + if (!terminal) { + return; + } + + await ClaudeIntegration.invokeClaudeAsync( + terminal, + cwd, + profileId, + this.getWindow, + (terminalId, projectPath, startTime) => { + SessionHandler.captureClaudeSessionId( + terminalId, + projectPath, + startTime, + this.terminals, + this.getWindow + ); + } + ); + } + /** * Invoke Claude in a terminal with optional profile override + * @deprecated Use invokeClaudeAsync for non-blocking behavior */ invokeClaude(id: string, cwd?: string, profileId?: string): void { const terminal = this.terminals.get(id); @@ -169,13 +202,26 @@ export class TerminalManager { terminal, profileId, this.getWindow, - (terminalId, cwd, profileId) => this.invokeClaude(terminalId, cwd, profileId), + async (terminalId, cwd, profileId) => this.invokeClaudeAsync(terminalId, cwd, profileId), (terminalId) => this.lastNotifiedRateLimitReset.delete(terminalId) ); } + /** + * Resume Claude in a terminal asynchronously (non-blocking) + */ + async resumeClaudeAsync(id: string, sessionId?: string): Promise { + const terminal = this.terminals.get(id); + if (!terminal) { + return; + } + + await ClaudeIntegration.resumeClaudeAsync(terminal, sessionId, this.getWindow); + } + /** * Resume Claude in a terminal with a specific session ID + * @deprecated Use resumeClaudeAsync for non-blocking behavior */ resumeClaude(id: string, sessionId?: string): void { const terminal = this.terminals.get(id); @@ -239,6 +285,12 @@ export class TerminalManager { this.terminals, this.getWindow ); + }, + onResumeNeeded: (terminalId, sessionId) => { + // Use async version to avoid blocking main process + this.resumeClaudeAsync(terminalId, sessionId).catch((error) => { + console.error('[terminal-manager] Failed to resume Claude session:', error); + }); } }, cols, @@ -279,6 +331,20 @@ export class TerminalManager { } } + /** + * Update terminal worktree config + */ + setWorktreeConfig(id: string, config: import('../../shared/types').TerminalWorktreeConfig | undefined): void { + const terminal = this.terminals.get(id); + if (terminal) { + terminal.worktreeConfig = config; + // Persist immediately when worktree config changes + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } + } + } + /** * Check if a terminal's PTY process is alive */ diff --git a/apps/frontend/src/main/terminal/types.ts b/apps/frontend/src/main/terminal/types.ts index 7a3618909d..f203973f56 100644 --- a/apps/frontend/src/main/terminal/types.ts +++ b/apps/frontend/src/main/terminal/types.ts @@ -1,5 +1,6 @@ import type * as pty from '@lydell/node-pty'; import type { BrowserWindow } from 'electron'; +import type { TerminalWorktreeConfig } from '../../shared/types'; /** * Terminal process tracking @@ -14,6 +15,8 @@ export interface TerminalProcess { claudeProfileId?: string; outputBuffer: string; title: string; + /** Associated worktree configuration (persisted across restarts) */ + worktreeConfig?: TerminalWorktreeConfig; } /** diff --git a/apps/frontend/src/main/updater/config.ts b/apps/frontend/src/main/updater/config.ts deleted file mode 100644 index 982042a66d..0000000000 --- a/apps/frontend/src/main/updater/config.ts +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Configuration for Auto Claude updater - */ - -/** - * GitHub repository configuration - */ -export const GITHUB_CONFIG = { - owner: 'AndyMik90', - repo: 'Auto-Claude', - autoBuildPath: 'apps/backend' // Path within repo where auto-claude backend lives -} as const; - -/** - * Files and directories to preserve during updates - */ -export const PRESERVE_FILES = ['.env', 'specs'] as const; - -/** - * Files and directories to skip when copying - */ -export const SKIP_FILES = ['__pycache__', '.DS_Store', '.git', 'specs', '.env'] as const; - -/** - * Update-related timeouts (in milliseconds) - */ -export const TIMEOUTS = { - requestTimeout: 10000, - downloadTimeout: 60000 -} as const; diff --git a/apps/frontend/src/main/updater/file-operations.ts b/apps/frontend/src/main/updater/file-operations.ts deleted file mode 100644 index b948631ddc..0000000000 --- a/apps/frontend/src/main/updater/file-operations.ts +++ /dev/null @@ -1,135 +0,0 @@ -/** - * File operation utilities for updates - */ - -import { existsSync, mkdirSync, readdirSync, statSync, copyFileSync, readFileSync, writeFileSync, rmSync } from 'fs'; -import path from 'path'; -import { exec } from 'child_process'; -import { promisify } from 'util'; -import { SKIP_FILES } from './config'; - -const execAsync = promisify(exec); - -/** - * Extract a .tar.gz file - * Uses system tar command on Unix or PowerShell on Windows - */ -export async function extractTarball(tarballPath: string, destPath: string): Promise { - try { - if (process.platform === 'win32') { - // On Windows, try multiple approaches: - // 1. Modern Windows 10/11 has built-in tar - // 2. Fall back to PowerShell's Expand-Archive for .zip (but .tar.gz needs tar) - // 3. Use PowerShell to extract via .NET - try { - // First try native tar (available on Windows 10 1803+) - await execAsync(`tar -xzf "${tarballPath}" -C "${destPath}"`); - } catch { - // Fall back to PowerShell with .NET for gzip decompression - // This is more complex but works on older Windows versions - const psScript = ` - $tarball = "${tarballPath.replace(/\\/g, '\\\\')}" - $dest = "${destPath.replace(/\\/g, '\\\\')}" - $tempTar = Join-Path $env:TEMP "auto-claude-update.tar" - - # Decompress gzip - $gzipStream = [System.IO.File]::OpenRead($tarball) - $decompressedStream = New-Object System.IO.Compression.GZipStream($gzipStream, [System.IO.Compression.CompressionMode]::Decompress) - $tarStream = [System.IO.File]::Create($tempTar) - $decompressedStream.CopyTo($tarStream) - $tarStream.Close() - $decompressedStream.Close() - $gzipStream.Close() - - # Extract tar using tar command (should work even if gzip didn't) - tar -xf $tempTar -C $dest - Remove-Item $tempTar -Force - `; - await execAsync(`powershell -NoProfile -Command "${psScript.replace(/"/g, '\\"').replace(/\n/g, ' ')}"`); - } - } else { - // Unix systems - use native tar - await execAsync(`tar -xzf "${tarballPath}" -C "${destPath}"`); - } - } catch (error) { - throw new Error(`Failed to extract tarball: ${error instanceof Error ? error.message : 'Unknown error'}`); - } -} - -/** - * Recursively copy directory - */ -export function copyDirectoryRecursive( - src: string, - dest: string, - preserveExisting: boolean = false -): void { - if (!existsSync(dest)) { - mkdirSync(dest, { recursive: true }); - } - - const entries = readdirSync(src, { withFileTypes: true }); - - for (const entry of entries) { - const srcPath = path.join(src, entry.name); - const destPath = path.join(dest, entry.name); - - // Skip certain files/directories - if (SKIP_FILES.includes(entry.name as (typeof SKIP_FILES)[number])) { - continue; - } - - // In preserve mode, skip existing files - if (preserveExisting && existsSync(destPath)) { - if (entry.isDirectory()) { - copyDirectoryRecursive(srcPath, destPath, preserveExisting); - } - continue; - } - - if (entry.isDirectory()) { - copyDirectoryRecursive(srcPath, destPath, preserveExisting); - } else { - copyFileSync(srcPath, destPath); - } - } -} - -/** - * Preserve specified files before update - */ -export function preserveFiles(targetPath: string, filesToPreserve: readonly string[]): Record { - const preservedContent: Record = {}; - - for (const file of filesToPreserve) { - const filePath = path.join(targetPath, file); - if (existsSync(filePath)) { - if (!statSync(filePath).isDirectory()) { - preservedContent[file] = readFileSync(filePath); - } - } - } - - return preservedContent; -} - -/** - * Restore preserved files after update - */ -export function restoreFiles(targetPath: string, preservedContent: Record): void { - for (const [file, content] of Object.entries(preservedContent)) { - writeFileSync(path.join(targetPath, file), content); - } -} - -/** - * Clean target directory while preserving specified files - */ -export function cleanTargetDirectory(targetPath: string, preserveFiles: readonly string[]): void { - const items = readdirSync(targetPath); - for (const item of items) { - if (!preserveFiles.includes(item)) { - rmSync(path.join(targetPath, item), { recursive: true, force: true }); - } - } -} diff --git a/apps/frontend/src/main/updater/http-client.ts b/apps/frontend/src/main/updater/http-client.ts deleted file mode 100644 index ada5f5d41a..0000000000 --- a/apps/frontend/src/main/updater/http-client.ts +++ /dev/null @@ -1,189 +0,0 @@ -/** - * HTTP client utilities for fetching updates - */ - -import https from 'https'; -import { createWriteStream } from 'fs'; -import { TIMEOUTS, GITHUB_CONFIG } from './config'; - -/** - * Fetch JSON from a URL using https - */ -export function fetchJson(url: string): Promise { - return new Promise((resolve, reject) => { - const headers = { - 'User-Agent': 'Auto-Claude-UI', - 'Accept': 'application/vnd.github+json' - }; - - const request = https.get(url, { headers }, (response) => { - // Handle redirects - if (response.statusCode === 301 || response.statusCode === 302) { - const redirectUrl = response.headers.location; - if (redirectUrl) { - fetchJson(redirectUrl).then(resolve).catch(reject); - return; - } - } - - // Handle HTTP 300 Multiple Choices (branch/tag name collision) - if (response.statusCode === 300) { - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - console.error('[HTTP] Multiple choices for resource:', { - url, - statusCode: 300, - response: data - }); - reject(new Error( - `Multiple resources found for ${url}. ` + - `This usually means a branch and tag have the same name. ` + - `Please report this issue at https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/issues` - )); - }); - response.on('error', reject); - return; - } - - if (response.statusCode !== 200) { - // Collect response body for error details (limit to 10KB) - const maxErrorSize = 10 * 1024; - let errorData = ''; - response.on('data', chunk => { - if (errorData.length < maxErrorSize) { - errorData += chunk.toString().slice(0, maxErrorSize - errorData.length); - } - }); - response.on('end', () => { - const errorMsg = `HTTP ${response.statusCode}: ${errorData || response.statusMessage || 'No error details'}`; - reject(new Error(errorMsg)); - }); - response.on('error', reject); - return; - } - - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - try { - resolve(JSON.parse(data) as T); - } catch (_e) { - reject(new Error('Failed to parse JSON response')); - } - }); - response.on('error', reject); - }); - - request.on('error', reject); - request.setTimeout(TIMEOUTS.requestTimeout, () => { - request.destroy(); - reject(new Error('Request timeout')); - }); - }); -} - -/** - * Download a file with progress tracking - */ -export function downloadFile( - url: string, - destPath: string, - onProgress?: (percent: number) => void -): Promise { - return new Promise((resolve, reject) => { - const file = createWriteStream(destPath); - - // GitHub API URLs need the GitHub Accept header to get a redirect to the actual file - // Non-API URLs (CDN, direct downloads) use octet-stream - const isGitHubApi = url.includes('api.github.com'); - const headers = { - 'User-Agent': 'Auto-Claude-UI', - 'Accept': isGitHubApi ? 'application/vnd.github+json' : 'application/octet-stream' - }; - - const request = https.get(url, { headers }, (response) => { - // Handle redirects - if (response.statusCode === 301 || response.statusCode === 302) { - file.close(); - const redirectUrl = response.headers.location; - if (redirectUrl) { - downloadFile(redirectUrl, destPath, onProgress).then(resolve).catch(reject); - return; - } - } - - // Handle HTTP 300 Multiple Choices (branch/tag name collision) - if (response.statusCode === 300) { - file.close(); - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - console.error('[HTTP] Multiple choices for resource:', { - url, - statusCode: 300, - response: data - }); - reject(new Error( - `Multiple resources found for ${url}. ` + - `This usually means a branch and tag have the same name. ` + - `Please download the latest version manually from: ` + - `https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest` - )); - }); - response.on('error', reject); - return; - } - - if (response.statusCode !== 200) { - file.close(); - // Collect response body for error details (limit to 10KB) - const maxErrorSize = 10 * 1024; - let errorData = ''; - response.on('data', chunk => { - if (errorData.length < maxErrorSize) { - errorData += chunk.toString().slice(0, maxErrorSize - errorData.length); - } - }); - response.on('end', () => { - const errorMsg = `HTTP ${response.statusCode}: ${errorData || response.statusMessage || 'No error details'}`; - reject(new Error(errorMsg)); - }); - response.on('error', reject); - return; - } - - const totalSize = parseInt(response.headers['content-length'] || '0', 10); - let downloadedSize = 0; - - response.on('data', (chunk) => { - downloadedSize += chunk.length; - if (totalSize > 0 && onProgress) { - onProgress(Math.round((downloadedSize / totalSize) * 100)); - } - }); - - response.pipe(file); - - file.on('finish', () => { - file.close(); - resolve(); - }); - - file.on('error', (err) => { - file.close(); - reject(err); - }); - }); - - request.on('error', (err) => { - file.close(); - reject(err); - }); - - request.setTimeout(TIMEOUTS.downloadTimeout, () => { - request.destroy(); - reject(new Error('Download timeout')); - }); - }); -} diff --git a/apps/frontend/src/main/updater/types.ts b/apps/frontend/src/main/updater/types.ts deleted file mode 100644 index d1e0b2c5f7..0000000000 --- a/apps/frontend/src/main/updater/types.ts +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Type definitions for Auto Claude updater system - */ - -/** - * GitHub Release API response (partial) - */ -export interface GitHubRelease { - tag_name: string; - name: string; - body: string; - html_url: string; - tarball_url: string; - published_at: string; - prerelease: boolean; - draft: boolean; -} - -/** - * Result of checking for updates - */ -export interface AutoBuildUpdateCheck { - updateAvailable: boolean; - currentVersion: string; - latestVersion?: string; - releaseNotes?: string; - releaseUrl?: string; - error?: string; -} - -/** - * Result of applying an update - */ -export interface AutoBuildUpdateResult { - success: boolean; - version?: string; - error?: string; -} - -/** - * Update progress stages - */ -export type UpdateStage = 'checking' | 'downloading' | 'extracting' | 'complete' | 'error'; - -/** - * Progress callback for download - */ -export type UpdateProgressCallback = (progress: { - stage: UpdateStage; - percent?: number; - message: string; -}) => void; - -/** - * Update metadata stored after successful update - */ -export interface UpdateMetadata { - version: string; - updatedAt: string; - source: string; - releaseTag: string; - releaseName: string; -} diff --git a/apps/frontend/src/main/updater/update-checker.ts b/apps/frontend/src/main/updater/update-checker.ts deleted file mode 100644 index 2f04d93348..0000000000 --- a/apps/frontend/src/main/updater/update-checker.ts +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Update checking functionality - */ - -import { GITHUB_CONFIG } from './config'; -import { fetchJson } from './http-client'; -import { getEffectiveVersion, parseVersionFromTag, compareVersions } from './version-manager'; -import { GitHubRelease, AutoBuildUpdateCheck } from './types'; -import { debugLog } from '../../shared/utils/debug-logger'; - -// Cache for the latest release info (used by download) -let cachedLatestRelease: GitHubRelease | null = null; - -/** - * Get cached release (if available) - */ -export function getCachedRelease(): GitHubRelease | null { - return cachedLatestRelease; -} - -/** - * Set cached release - */ -export function setCachedRelease(release: GitHubRelease | null): void { - cachedLatestRelease = release; -} - -/** - * Clear cached release - */ -export function clearCachedRelease(): void { - cachedLatestRelease = null; -} - -/** - * Check GitHub Releases for the latest version - */ -export async function checkForUpdates(): Promise { - // Use effective version which accounts for source updates - const currentVersion = getEffectiveVersion(); - debugLog('[UpdateCheck] Current effective version:', currentVersion); - - try { - // Fetch latest release from GitHub Releases API - const releaseUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - const release = await fetchJson(releaseUrl); - - // Cache for download function - setCachedRelease(release); - - // Parse version from tag (e.g., "v1.2.0" -> "1.2.0") - const latestVersion = parseVersionFromTag(release.tag_name); - debugLog('[UpdateCheck] Latest version:', latestVersion); - - // Compare versions - const updateAvailable = compareVersions(latestVersion, currentVersion) > 0; - debugLog('[UpdateCheck] Update available:', updateAvailable); - - return { - updateAvailable, - currentVersion, - latestVersion, - releaseNotes: release.body || undefined, - releaseUrl: release.html_url || undefined - }; - } catch (error) { - // Clear cache on error - clearCachedRelease(); - debugLog('[UpdateCheck] Error:', error instanceof Error ? error.message : error); - - return { - updateAvailable: false, - currentVersion, - error: error instanceof Error ? error.message : 'Failed to check for updates' - }; - } -} diff --git a/apps/frontend/src/main/updater/update-installer.ts b/apps/frontend/src/main/updater/update-installer.ts deleted file mode 100644 index a4e2d350db..0000000000 --- a/apps/frontend/src/main/updater/update-installer.ts +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Update installation and application - */ - -import { existsSync, mkdirSync, writeFileSync, rmSync, readdirSync } from 'fs'; -import path from 'path'; -import { app } from 'electron'; -import { GITHUB_CONFIG, PRESERVE_FILES } from './config'; -import { downloadFile, fetchJson } from './http-client'; -import { parseVersionFromTag } from './version-manager'; -import { getUpdateCachePath, getUpdateTargetPath } from './path-resolver'; -import { extractTarball, copyDirectoryRecursive, preserveFiles, restoreFiles, cleanTargetDirectory } from './file-operations'; -import { getCachedRelease, setCachedRelease, clearCachedRelease } from './update-checker'; -import { GitHubRelease, AutoBuildUpdateResult, UpdateProgressCallback, UpdateMetadata } from './types'; -import { debugLog } from '../../shared/utils/debug-logger'; - -/** - * Download and apply the latest auto-claude update from GitHub Releases - * - * Note: In production, this updates the bundled source in userData. - * For packaged apps, we can't modify resourcesPath directly, - * so we use a "source override" system. - */ -export async function downloadAndApplyUpdate( - onProgress?: UpdateProgressCallback -): Promise { - const cachePath = getUpdateCachePath(); - - debugLog('[Update] Starting update process...'); - debugLog('[Update] Cache path:', cachePath); - - try { - onProgress?.({ - stage: 'checking', - message: 'Fetching release info...' - }); - - // Ensure cache directory exists - if (!existsSync(cachePath)) { - mkdirSync(cachePath, { recursive: true }); - debugLog('[Update] Created cache directory'); - } - - // Get release info (use cache or fetch fresh) - let release = getCachedRelease(); - if (!release) { - const releaseUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - debugLog('[Update] Fetching release info from:', releaseUrl); - release = await fetchJson(releaseUrl); - setCachedRelease(release); - } else { - debugLog('[Update] Using cached release info'); - } - - // Use explicit tag reference URL to avoid HTTP 300 when branch/tag names collide - // See: https://github.com/AndyMik90/Auto-Claude/issues/78 - const tarballUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/tarball/refs/tags/${release.tag_name}`; - const releaseVersion = parseVersionFromTag(release.tag_name); - debugLog('[Update] Release version:', releaseVersion); - debugLog('[Update] Tarball URL:', tarballUrl); - - const tarballPath = path.join(cachePath, 'auto-claude-update.tar.gz'); - const extractPath = path.join(cachePath, 'extracted'); - - // Clean up previous extraction - if (existsSync(extractPath)) { - rmSync(extractPath, { recursive: true, force: true }); - } - mkdirSync(extractPath, { recursive: true }); - - onProgress?.({ - stage: 'downloading', - percent: 0, - message: 'Downloading update...' - }); - - debugLog('[Update] Starting download to:', tarballPath); - - // Download the tarball - await downloadFile(tarballUrl, tarballPath, (percent) => { - onProgress?.({ - stage: 'downloading', - percent, - message: `Downloading... ${percent}%` - }); - }); - - debugLog('[Update] Download complete'); - - onProgress?.({ - stage: 'extracting', - message: 'Extracting update...' - }); - - debugLog('[Update] Extracting to:', extractPath); - - // Extract the tarball - await extractTarball(tarballPath, extractPath); - - debugLog('[Update] Extraction complete'); - - // Find the auto-claude folder in extracted content - // GitHub tarballs have a root folder like "owner-repo-hash/" - const extractedDirs = readdirSync(extractPath); - if (extractedDirs.length === 0) { - throw new Error('Empty tarball'); - } - - const rootDir = path.join(extractPath, extractedDirs[0]); - const autoBuildSource = path.join(rootDir, GITHUB_CONFIG.autoBuildPath); - - if (!existsSync(autoBuildSource)) { - throw new Error('auto-claude folder not found in download'); - } - - // Determine where to install the update - const targetPath = getUpdateTargetPath(); - debugLog('[Update] Target install path:', targetPath); - - // Backup existing source (if in dev mode) - const backupPath = path.join(cachePath, 'backup'); - if (!app.isPackaged && existsSync(targetPath)) { - if (existsSync(backupPath)) { - rmSync(backupPath, { recursive: true, force: true }); - } - // Simple copy for backup - debugLog('[Update] Creating backup at:', backupPath); - copyDirectoryRecursive(targetPath, backupPath); - } - - // Apply the update - debugLog('[Update] Applying update...'); - await applyUpdate(targetPath, autoBuildSource); - debugLog('[Update] Update applied successfully'); - - // Write update metadata - const metadata: UpdateMetadata = { - version: releaseVersion, - updatedAt: new Date().toISOString(), - source: 'github-release', - releaseTag: release.tag_name, - releaseName: release.name - }; - writeUpdateMetadata(targetPath, metadata); - - // Clear the cache after successful update - clearCachedRelease(); - - // Cleanup - rmSync(tarballPath, { force: true }); - rmSync(extractPath, { recursive: true, force: true }); - - onProgress?.({ - stage: 'complete', - message: `Updated to version ${releaseVersion}` - }); - - debugLog('[Update] ============================================'); - debugLog('[Update] UPDATE SUCCESSFUL'); - debugLog('[Update] New version:', releaseVersion); - debugLog('[Update] Target path:', targetPath); - debugLog('[Update] ============================================'); - - return { - success: true, - version: releaseVersion - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : 'Update failed'; - debugLog('[Update] ============================================'); - debugLog('[Update] UPDATE FAILED'); - debugLog('[Update] Error:', errorMessage); - debugLog('[Update] ============================================'); - - // Provide user-friendly error message for HTTP 300 errors - let displayMessage = errorMessage; - if (errorMessage.includes('Multiple resources found')) { - displayMessage = - `Update failed due to repository configuration issue (HTTP 300). ` + - `Please download the latest version manually from: ` + - `https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - } - - onProgress?.({ - stage: 'error', - message: displayMessage - }); - - return { - success: false, - error: displayMessage - }; - } -} - -/** - * Apply update to target directory - */ -async function applyUpdate(targetPath: string, sourcePath: string): Promise { - if (existsSync(targetPath)) { - // Preserve important files - const preservedContent = preserveFiles(targetPath, PRESERVE_FILES); - - // Clean target but preserve certain files - cleanTargetDirectory(targetPath, PRESERVE_FILES); - - // Copy new files - copyDirectoryRecursive(sourcePath, targetPath, true); - - // Restore preserved files that might have been overwritten - restoreFiles(targetPath, preservedContent); - } else { - mkdirSync(targetPath, { recursive: true }); - copyDirectoryRecursive(sourcePath, targetPath, false); - } -} - -/** - * Write update metadata to disk - */ -function writeUpdateMetadata(targetPath: string, metadata: UpdateMetadata): void { - const metadataPath = path.join(targetPath, '.update-metadata.json'); - writeFileSync(metadataPath, JSON.stringify(metadata, null, 2)); -} diff --git a/apps/frontend/src/main/updater/update-status.ts b/apps/frontend/src/main/updater/update-status.ts deleted file mode 100644 index 93ec5e29c0..0000000000 --- a/apps/frontend/src/main/updater/update-status.ts +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Update status checking utilities - */ - -import { existsSync, readFileSync } from 'fs'; -import path from 'path'; -import { app } from 'electron'; -import { getBundledVersion, compareVersions } from './version-manager'; -import { UpdateMetadata } from './types'; - -/** - * Check if there's a pending source update that requires restart - */ -export function hasPendingSourceUpdate(): boolean { - if (!app.isPackaged) { - return false; - } - - const overridePath = path.join(app.getPath('userData'), 'auto-claude-source'); - const metadataPath = path.join(overridePath, '.update-metadata.json'); - - if (!existsSync(metadataPath)) { - return false; - } - - try { - const metadata = JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - const bundledVersion = getBundledVersion(); - return compareVersions(metadata.version, bundledVersion) > 0; - } catch { - return false; - } -} - -/** - * Get update metadata if available - */ -export function getUpdateMetadata(): UpdateMetadata | null { - const overridePath = path.join(app.getPath('userData'), 'auto-claude-source'); - const metadataPath = path.join(overridePath, '.update-metadata.json'); - - if (!existsSync(metadataPath)) { - return null; - } - - try { - return JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - } catch { - return null; - } -} diff --git a/apps/frontend/src/main/updater/version-manager.ts b/apps/frontend/src/main/updater/version-manager.ts index 92edcb8bd7..0924bd7e92 100644 --- a/apps/frontend/src/main/updater/version-manager.ts +++ b/apps/frontend/src/main/updater/version-manager.ts @@ -1,96 +1,22 @@ /** * Version management utilities + * + * Simplified version that uses only the bundled app version. + * The "source updater" system has been removed since the backend + * is bundled with the app and updates via electron-updater. */ import { app } from 'electron'; -import { existsSync, readFileSync } from 'fs'; -import path from 'path'; -import type { UpdateMetadata } from './types'; /** * Get the current app/framework version from package.json * - * Uses app.getVersion() (from package.json) as the base version. + * Uses app.getVersion() (from package.json) as the version. */ export function getBundledVersion(): string { return app.getVersion(); } -/** - * Get the effective version - accounts for source updates - * - * Returns the updated source version if an update has been applied, - * otherwise returns the bundled version. - */ -export function getEffectiveVersion(): string { - const isDebug = process.env.DEBUG === 'true'; - - // Build list of paths to check for update metadata - const metadataPaths: string[] = []; - - if (app.isPackaged) { - // Production: check userData override path - metadataPaths.push( - path.join(app.getPath('userData'), 'auto-claude-source', '.update-metadata.json') - ); - } else { - // Development: check the actual source paths where updates are written - const possibleSourcePaths = [ - // Apps structure: apps/backend - path.join(app.getAppPath(), '..', 'backend'), - path.join(process.cwd(), 'apps', 'backend'), - path.resolve(__dirname, '..', '..', '..', 'backend') - ]; - - for (const sourcePath of possibleSourcePaths) { - metadataPaths.push(path.join(sourcePath, '.update-metadata.json')); - } - } - - if (isDebug) { - console.log('[Version] Checking metadata paths:', metadataPaths); - } - - // Check each path for metadata - for (const metadataPath of metadataPaths) { - const exists = existsSync(metadataPath); - if (isDebug) { - console.log(`[Version] Checking ${metadataPath}: ${exists ? 'EXISTS' : 'not found'}`); - } - if (exists) { - try { - const metadata = JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - if (metadata.version) { - if (isDebug) { - console.log(`[Version] Found metadata version: ${metadata.version}`); - } - return metadata.version; - } - } catch (e) { - if (isDebug) { - console.log(`[Version] Error reading metadata: ${e}`); - } - // Continue to next path - } - } - } - - const bundledVersion = app.getVersion(); - if (isDebug) { - console.log(`[Version] No metadata found, using bundled version: ${bundledVersion}`); - } - return bundledVersion; -} - -/** - * Parse version from GitHub release tag - * Handles tags like "v1.2.0", "1.2.0", "v1.2.0-beta" - */ -export function parseVersionFromTag(tag: string): string { - // Remove leading 'v' if present - return tag.replace(/^v/, ''); -} - /** * Parse a version string into its components * Handles versions like "2.7.2", "2.7.2-beta.6", "2.7.2-alpha.1" diff --git a/apps/frontend/src/main/utils/profile-manager.test.ts b/apps/frontend/src/main/utils/profile-manager.test.ts new file mode 100644 index 0000000000..a0e3aef370 --- /dev/null +++ b/apps/frontend/src/main/utils/profile-manager.test.ts @@ -0,0 +1,199 @@ +/** + * Tests for profile-manager.ts + * + * Red phase - write failing tests first + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { promises as fsPromises } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +import { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions +} from './profile-manager'; +import type { ProfilesFile } from '../../shared/types/profile'; + +// Mock Electron app.getPath +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') { + return '/mock/userdata'; + } + return '/mock/path'; + }) + } +})); + +// Mock fs module - mock the promises export which is used by profile-manager.ts +vi.mock('fs', () => { + const promises = { + readFile: vi.fn(), + writeFile: vi.fn(), + mkdir: vi.fn(), + chmod: vi.fn() + }; + return { + default: { promises }, // Default export contains promises + promises, // Named export for promises + existsSync: vi.fn(), + constants: { + O_RDONLY: 0, + S_IRUSR: 0o400 + } + }; +}); + +describe('profile-manager', () => { + const mockProfilesPath = '/mock/userdata/profiles.json'; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadProfilesFile', () => { + it('should return default profiles file when file does not exist', async () => { + vi.mocked(fsPromises.readFile).mockRejectedValue(new Error('ENOENT')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should return default profiles file when file is corrupted JSON', async () => { + vi.mocked(fsPromises.readFile).mockResolvedValue(Buffer.from('invalid json{')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should load valid profiles file', async () => { + const mockData: ProfilesFile = { + profiles: [ + { + id: 'test-id-1', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-test-key', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'test-id-1', + version: 1 + }; + + vi.mocked(fsPromises.readFile).mockResolvedValue( + Buffer.from(JSON.stringify(mockData)) + ); + + const result = await loadProfilesFile(); + + expect(result).toEqual(mockData); + }); + + it('should use auto-claude directory for profiles.json path', async () => { + vi.mocked(fsPromises.readFile).mockRejectedValue(new Error('ENOENT')); + + await loadProfilesFile(); + + // Verify the file path includes auto-claude + const readFileCalls = vi.mocked(fsPromises.readFile).mock.calls; + const filePath = readFileCalls[0]?.[0]; + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + }); + }); + + describe('saveProfilesFile', () => { + it('should write profiles file to disk', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + vi.mocked(fsPromises.writeFile).mockResolvedValue(undefined); + + await saveProfilesFile(mockData); + + expect(fsPromises.writeFile).toHaveBeenCalled(); + const writeFileCall = vi.mocked(fsPromises.writeFile).mock.calls[0]; + const filePath = writeFileCall?.[0]; + const content = writeFileCall?.[1]; + + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + expect(content).toBe(JSON.stringify(mockData, null, 2)); + }); + + it('should throw error when write fails', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + vi.mocked(fsPromises.writeFile).mockRejectedValue(new Error('Write failed')); + + await expect(saveProfilesFile(mockData)).rejects.toThrow('Write failed'); + }); + }); + + describe('generateProfileId', () => { + it('should generate unique UUID v4 format IDs', () => { + const id1 = generateProfileId(); + const id2 = generateProfileId(); + + // UUID v4 format: xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx + expect(id1).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + expect(id2).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + + // IDs should be unique + expect(id1).not.toBe(id2); + }); + + it('should generate different IDs on consecutive calls', () => { + const ids = new Set(); + for (let i = 0; i < 100; i++) { + ids.add(generateProfileId()); + } + expect(ids.size).toBe(100); + }); + }); + + describe('validateFilePermissions', () => { + it('should validate user-readable only file permissions', async () => { + // Mock successful chmod + vi.mocked(fsPromises.chmod).mockResolvedValue(undefined); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(true); + }); + + it('should return false if chmod fails', async () => { + vi.mocked(fsPromises.chmod).mockRejectedValue(new Error('Permission denied')); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(false); + }); + }); +}); diff --git a/apps/frontend/src/main/utils/profile-manager.ts b/apps/frontend/src/main/utils/profile-manager.ts new file mode 100644 index 0000000000..2d6deb8c59 --- /dev/null +++ b/apps/frontend/src/main/utils/profile-manager.ts @@ -0,0 +1,90 @@ +/** + * Profile Manager - File I/O for API profiles + * + * Handles loading and saving profiles.json from the auto-claude directory. + * Provides graceful handling for missing or corrupted files. + */ + +import { promises as fs } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +import type { ProfilesFile } from '../../shared/types/profile'; + +/** + * Get the path to profiles.json in the auto-claude directory + */ +export function getProfilesFilePath(): string { + const userDataPath = app.getPath('userData'); + return path.join(userDataPath, 'auto-claude', 'profiles.json'); +} + +/** + * Load profiles.json from disk + * Returns default empty profiles file if file doesn't exist or is corrupted + */ +export async function loadProfilesFile(): Promise { + const filePath = getProfilesFilePath(); + + try { + const content = await fs.readFile(filePath, 'utf-8'); + const data = JSON.parse(content) as ProfilesFile; + return data; + } catch (error) { + // File doesn't exist or is corrupted - return default + return { + profiles: [], + activeProfileId: null, + version: 1 + }; + } +} + +/** + * Save profiles.json to disk + * Creates the auto-claude directory if it doesn't exist + */ +export async function saveProfilesFile(data: ProfilesFile): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory exists + try { + await fs.mkdir(dir, { recursive: true }); + } catch (error) { + // Only ignore EEXIST errors (directory already exists) + // Rethrow other errors (e.g., permission issues) + if ((error as NodeJS.ErrnoException).code !== 'EEXIST') { + throw error; + } + } + + // Write file with formatted JSON + const content = JSON.stringify(data, null, 2); + await fs.writeFile(filePath, content, 'utf-8'); +} + +/** + * Generate a unique UUID v4 for a new profile + */ +export function generateProfileId(): string { + // Generate UUID v4 + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }); +} + +/** + * Validate and set file permissions to user-readable only + * Returns true if successful, false otherwise + */ +export async function validateFilePermissions(filePath: string): Promise { + try { + // Set file permissions to user-readable only (0600) + await fs.chmod(filePath, 0o600); + return true; + } catch { + return false; + } +} diff --git a/apps/frontend/src/main/utils/spec-number-lock.ts b/apps/frontend/src/main/utils/spec-number-lock.ts index d7a57bea10..b33fc455cc 100644 --- a/apps/frontend/src/main/utils/spec-number-lock.ts +++ b/apps/frontend/src/main/utils/spec-number-lock.ts @@ -154,7 +154,7 @@ export class SpecNumberLock { maxNumber = Math.max(maxNumber, this.scanSpecsDir(mainSpecsDir)); // 2. Scan all worktree specs - const worktreesDir = path.join(this.projectDir, '.worktrees'); + const worktreesDir = path.join(this.projectDir, '.auto-claude', 'worktrees', 'tasks'); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); diff --git a/apps/frontend/src/main/utils/windows-paths.ts b/apps/frontend/src/main/utils/windows-paths.ts new file mode 100644 index 0000000000..355640ac01 --- /dev/null +++ b/apps/frontend/src/main/utils/windows-paths.ts @@ -0,0 +1,283 @@ +/** + * Windows Executable Path Discovery Utility + * + * Provides reusable logic for finding Windows executables in common installation + * locations. Handles environment variable expansion and security validation. + * + * Used by cli-tool-manager.ts for Git, GitHub CLI, Claude CLI, etc. + * Follows the same pattern as homebrew-python.ts for platform-specific detection. + */ + +import { existsSync } from 'fs'; +import { access, constants } from 'fs/promises'; +import { execFileSync, execFile } from 'child_process'; +import { promisify } from 'util'; +import path from 'path'; +import os from 'os'; + +const execFileAsync = promisify(execFile); + +export interface WindowsToolPaths { + toolName: string; + executable: string; + patterns: string[]; +} + +export const WINDOWS_GIT_PATHS: WindowsToolPaths = { + toolName: 'Git', + executable: 'git.exe', + patterns: [ + '%PROGRAMFILES%\\Git\\cmd', + '%PROGRAMFILES(X86)%\\Git\\cmd', + '%LOCALAPPDATA%\\Programs\\Git\\cmd', + '%USERPROFILE%\\scoop\\apps\\git\\current\\cmd', + '%PROGRAMFILES%\\Git\\bin', + '%PROGRAMFILES(X86)%\\Git\\bin', + '%PROGRAMFILES%\\Git\\mingw64\\bin', + ], +}; + +function isSecurePath(pathStr: string): boolean { + const dangerousPatterns = [ + /[;&|`$(){}[\]<>!]/, // Shell metacharacters + /\.\.\//, // Unix directory traversal + /\.\.\\/, // Windows directory traversal + /[\r\n]/, // Newlines (command injection) + ]; + + for (const pattern of dangerousPatterns) { + if (pattern.test(pathStr)) { + return false; + } + } + + return true; +} + +export function expandWindowsPath(pathPattern: string): string | null { + const envVars: Record = { + '%PROGRAMFILES%': process.env.ProgramFiles || 'C:\\Program Files', + '%PROGRAMFILES(X86)%': process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', + '%LOCALAPPDATA%': process.env.LOCALAPPDATA, + '%APPDATA%': process.env.APPDATA, + '%USERPROFILE%': process.env.USERPROFILE || os.homedir(), + }; + + let expandedPath = pathPattern; + + for (const [placeholder, value] of Object.entries(envVars)) { + if (expandedPath.includes(placeholder)) { + if (!value) { + return null; + } + expandedPath = expandedPath.replace(placeholder, value); + } + } + + // Verify no unexpanded placeholders remain (indicates unknown variable) + if (/%[^%]+%/.test(expandedPath)) { + return null; + } + + // Normalize the path (resolve double backslashes, etc.) + return path.normalize(expandedPath); +} + +export function getWindowsExecutablePaths( + toolPaths: WindowsToolPaths, + logPrefix: string = '[Windows Paths]' +): string[] { + // Only run on Windows + if (process.platform !== 'win32') { + return []; + } + + const validPaths: string[] = []; + + for (const pattern of toolPaths.patterns) { + const expandedDir = expandWindowsPath(pattern); + + if (!expandedDir) { + console.warn(`${logPrefix} Could not expand path pattern: ${pattern}`); + continue; + } + + const fullPath = path.join(expandedDir, toolPaths.executable); + + // Security validation - reject potentially dangerous paths + if (!isSecurePath(fullPath)) { + console.warn(`${logPrefix} Path failed security validation: ${fullPath}`); + continue; + } + + if (existsSync(fullPath)) { + validPaths.push(fullPath); + } + } + + return validPaths; +} + +/** + * Find a Windows executable using the `where` command. + * This is the most reliable method as it searches: + * - All directories in PATH + * - App Paths registry entries + * - Current directory + * + * Works regardless of where the tool is installed (custom paths, different drives, etc.) + * + * @param executable - The executable name (e.g., 'git', 'gh', 'python') + * @param logPrefix - Prefix for console logging + * @returns The full path to the executable, or null if not found + */ +export function findWindowsExecutableViaWhere( + executable: string, + logPrefix: string = '[Windows Where]' +): string | null { + if (process.platform !== 'win32') { + return null; + } + + // Security: Only allow simple executable names (alphanumeric, dash, underscore, dot) + if (!/^[\w.-]+$/.test(executable)) { + console.warn(`${logPrefix} Invalid executable name: ${executable}`); + return null; + } + + try { + // Use 'where' command to find the executable + // where.exe is a built-in Windows command that finds executables + const result = execFileSync('where.exe', [executable], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + }).trim(); + + // 'where' returns multiple paths separated by newlines if found in multiple locations + // We take the first one (highest priority in PATH) + const paths = result.split(/\r?\n/).filter(p => p.trim()); + + if (paths.length > 0) { + const foundPath = paths[0].trim(); + + // Validate the path exists and is secure + if (existsSync(foundPath) && isSecurePath(foundPath)) { + console.log(`${logPrefix} Found via where: ${foundPath}`); + return foundPath; + } + } + + return null; + } catch { + // 'where' returns exit code 1 if not found, which throws an error + return null; + } +} + +/** + * Async version of getWindowsExecutablePaths. + * Use this in async contexts to avoid blocking the main process. + */ +export async function getWindowsExecutablePathsAsync( + toolPaths: WindowsToolPaths, + logPrefix: string = '[Windows Paths]' +): Promise { + // Only run on Windows + if (process.platform !== 'win32') { + return []; + } + + const validPaths: string[] = []; + + for (const pattern of toolPaths.patterns) { + const expandedDir = expandWindowsPath(pattern); + + if (!expandedDir) { + console.warn(`${logPrefix} Could not expand path pattern: ${pattern}`); + continue; + } + + const fullPath = path.join(expandedDir, toolPaths.executable); + + // Security validation - reject potentially dangerous paths + if (!isSecurePath(fullPath)) { + console.warn(`${logPrefix} Path failed security validation: ${fullPath}`); + continue; + } + + try { + await access(fullPath, constants.F_OK); + validPaths.push(fullPath); + } catch { + // File doesn't exist, skip + } + } + + return validPaths; +} + +/** + * Async version of findWindowsExecutableViaWhere. + * Use this in async contexts to avoid blocking the main process. + * + * Find a Windows executable using the `where` command. + * This is the most reliable method as it searches: + * - All directories in PATH + * - App Paths registry entries + * - Current directory + * + * Works regardless of where the tool is installed (custom paths, different drives, etc.) + * + * @param executable - The executable name (e.g., 'git', 'gh', 'python') + * @param logPrefix - Prefix for console logging + * @returns The full path to the executable, or null if not found + */ +export async function findWindowsExecutableViaWhereAsync( + executable: string, + logPrefix: string = '[Windows Where]' +): Promise { + if (process.platform !== 'win32') { + return null; + } + + // Security: Only allow simple executable names (alphanumeric, dash, underscore, dot) + if (!/^[\w.-]+$/.test(executable)) { + console.warn(`${logPrefix} Invalid executable name: ${executable}`); + return null; + } + + try { + // Use 'where' command to find the executable + // where.exe is a built-in Windows command that finds executables + const { stdout } = await execFileAsync('where.exe', [executable], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + }); + + // 'where' returns multiple paths separated by newlines if found in multiple locations + // We take the first one (highest priority in PATH) + const paths = stdout.trim().split(/\r?\n/).filter(p => p.trim()); + + if (paths.length > 0) { + const foundPath = paths[0].trim(); + + // Validate the path exists and is secure + try { + await access(foundPath, constants.F_OK); + if (isSecurePath(foundPath)) { + console.log(`${logPrefix} Found via where: ${foundPath}`); + return foundPath; + } + } catch { + // Path doesn't exist + } + } + + return null; + } catch { + // 'where' returns exit code 1 if not found, which throws an error + return null; + } +} diff --git a/apps/frontend/src/main/worktree-paths.ts b/apps/frontend/src/main/worktree-paths.ts new file mode 100644 index 0000000000..b446562aa0 --- /dev/null +++ b/apps/frontend/src/main/worktree-paths.ts @@ -0,0 +1,76 @@ +/** + * Shared worktree path utilities + * + * Centralizes all worktree path constants and helper functions to avoid duplication + * and ensure consistent path handling across the application. + */ + +import path from 'path'; +import { existsSync } from 'fs'; + +// Path constants for worktree directories +export const TASK_WORKTREE_DIR = '.auto-claude/worktrees/tasks'; +export const TERMINAL_WORKTREE_DIR = '.auto-claude/worktrees/terminal'; + +// Legacy path for backwards compatibility +export const LEGACY_WORKTREE_DIR = '.worktrees'; + +/** + * Get the task worktrees directory path + */ +export function getTaskWorktreeDir(projectPath: string): string { + return path.join(projectPath, TASK_WORKTREE_DIR); +} + +/** + * Get the full path for a specific task worktree + */ +export function getTaskWorktreePath(projectPath: string, specId: string): string { + return path.join(projectPath, TASK_WORKTREE_DIR, specId); +} + +/** + * Find a task worktree path, checking new location first then legacy + * Returns the path if found, null otherwise + */ +export function findTaskWorktree(projectPath: string, specId: string): string | null { + // Check new path first + const newPath = path.join(projectPath, TASK_WORKTREE_DIR, specId); + if (existsSync(newPath)) return newPath; + + // Legacy fallback + const legacyPath = path.join(projectPath, LEGACY_WORKTREE_DIR, specId); + if (existsSync(legacyPath)) return legacyPath; + + return null; +} + +/** + * Get the terminal worktrees directory path + */ +export function getTerminalWorktreeDir(projectPath: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_DIR); +} + +/** + * Get the full path for a specific terminal worktree + */ +export function getTerminalWorktreePath(projectPath: string, name: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_DIR, name); +} + +/** + * Find a terminal worktree path, checking new location first then legacy + * Returns the path if found, null otherwise + */ +export function findTerminalWorktree(projectPath: string, name: string): string | null { + // Check new path first + const newPath = path.join(projectPath, TERMINAL_WORKTREE_DIR, name); + if (existsSync(newPath)) return newPath; + + // Legacy fallback (terminal worktrees used terminal-{name} prefix) + const legacyPath = path.join(projectPath, LEGACY_WORKTREE_DIR, `terminal-${name}`); + if (existsSync(legacyPath)) return legacyPath; + + return null; +} diff --git a/apps/frontend/src/preload/api/agent-api.ts b/apps/frontend/src/preload/api/agent-api.ts index c4ae68ff15..f9af4fadfb 100644 --- a/apps/frontend/src/preload/api/agent-api.ts +++ b/apps/frontend/src/preload/api/agent-api.ts @@ -8,7 +8,6 @@ * - Changelog operations * - Linear integration * - GitHub integration - * - Auto-build source updates * - Shell operations */ @@ -19,7 +18,6 @@ import { createChangelogAPI, ChangelogAPI } from './modules/changelog-api'; import { createLinearAPI, LinearAPI } from './modules/linear-api'; import { createGitHubAPI, GitHubAPI } from './modules/github-api'; import { createGitLabAPI, GitLabAPI } from './modules/gitlab-api'; -import { createAutoBuildAPI, AutoBuildAPI } from './modules/autobuild-api'; import { createShellAPI, ShellAPI } from './modules/shell-api'; /** @@ -34,7 +32,6 @@ export interface AgentAPI extends LinearAPI, GitHubAPI, GitLabAPI, - AutoBuildAPI, ShellAPI {} /** @@ -50,7 +47,6 @@ export const createAgentAPI = (): AgentAPI => { const linearAPI = createLinearAPI(); const githubAPI = createGitHubAPI(); const gitlabAPI = createGitLabAPI(); - const autobuildAPI = createAutoBuildAPI(); const shellAPI = createShellAPI(); return { @@ -75,9 +71,6 @@ export const createAgentAPI = (): AgentAPI => { // GitLab Integration API ...gitlabAPI, - // Auto-Build Source Update API - ...autobuildAPI, - // Shell Operations API ...shellAPI }; @@ -92,6 +85,5 @@ export type { LinearAPI, GitHubAPI, GitLabAPI, - AutoBuildAPI, ShellAPI }; diff --git a/apps/frontend/src/preload/api/app-update-api.ts b/apps/frontend/src/preload/api/app-update-api.ts index 3ba20af7d6..313c16eded 100644 --- a/apps/frontend/src/preload/api/app-update-api.ts +++ b/apps/frontend/src/preload/api/app-update-api.ts @@ -16,6 +16,7 @@ export interface AppUpdateAPI { // Operations checkAppUpdate: () => Promise>; downloadAppUpdate: () => Promise; + downloadStableUpdate: () => Promise; installAppUpdate: () => void; getAppVersion: () => Promise; @@ -29,6 +30,9 @@ export interface AppUpdateAPI { onAppUpdateProgress: ( callback: (progress: AppUpdateProgress) => void ) => IpcListenerCleanup; + onAppUpdateStableDowngrade: ( + callback: (info: AppUpdateInfo) => void + ) => IpcListenerCleanup; } /** @@ -42,6 +46,9 @@ export const createAppUpdateAPI = (): AppUpdateAPI => ({ downloadAppUpdate: (): Promise => invokeIpc(IPC_CHANNELS.APP_UPDATE_DOWNLOAD), + downloadStableUpdate: (): Promise => + invokeIpc(IPC_CHANNELS.APP_UPDATE_DOWNLOAD_STABLE), + installAppUpdate: (): void => { invokeIpc(IPC_CHANNELS.APP_UPDATE_INSTALL); }, @@ -63,5 +70,10 @@ export const createAppUpdateAPI = (): AppUpdateAPI => ({ onAppUpdateProgress: ( callback: (progress: AppUpdateProgress) => void ): IpcListenerCleanup => - createIpcListener(IPC_CHANNELS.APP_UPDATE_PROGRESS, callback) + createIpcListener(IPC_CHANNELS.APP_UPDATE_PROGRESS, callback), + + onAppUpdateStableDowngrade: ( + callback: (info: AppUpdateInfo) => void + ): IpcListenerCleanup => + createIpcListener(IPC_CHANNELS.APP_UPDATE_STABLE_DOWNGRADE, callback) }); diff --git a/apps/frontend/src/preload/api/index.ts b/apps/frontend/src/preload/api/index.ts index 51e28c76ae..25b72a241d 100644 --- a/apps/frontend/src/preload/api/index.ts +++ b/apps/frontend/src/preload/api/index.ts @@ -9,9 +9,11 @@ import { InsightsAPI, createInsightsAPI } from './modules/insights-api'; import { AppUpdateAPI, createAppUpdateAPI } from './app-update-api'; import { GitHubAPI, createGitHubAPI } from './modules/github-api'; import { GitLabAPI, createGitLabAPI } from './modules/gitlab-api'; +import { ADOAPI, createADOAPI } from './modules/ado-api'; import { DebugAPI, createDebugAPI } from './modules/debug-api'; import { ClaudeCodeAPI, createClaudeCodeAPI } from './modules/claude-code-api'; import { McpAPI, createMcpAPI } from './modules/mcp-api'; +import { ProfileAPI, createProfileAPI } from './profile-api'; export interface ElectronAPI extends ProjectAPI, @@ -26,8 +28,10 @@ export interface ElectronAPI extends GitLabAPI, DebugAPI, ClaudeCodeAPI, - McpAPI { + McpAPI, + ProfileAPI { github: GitHubAPI; + ado: ADOAPI; } export const createElectronAPI = (): ElectronAPI => ({ @@ -44,7 +48,9 @@ export const createElectronAPI = (): ElectronAPI => ({ ...createDebugAPI(), ...createClaudeCodeAPI(), ...createMcpAPI(), - github: createGitHubAPI() + ...createProfileAPI(), + github: createGitHubAPI(), + ado: createADOAPI() }); // Export individual API creators for potential use in tests or specialized contexts @@ -58,8 +64,10 @@ export { createIdeationAPI, createInsightsAPI, createAppUpdateAPI, + createProfileAPI, createGitHubAPI, createGitLabAPI, + createADOAPI, createDebugAPI, createClaudeCodeAPI, createMcpAPI @@ -75,8 +83,10 @@ export type { IdeationAPI, InsightsAPI, AppUpdateAPI, + ProfileAPI, GitHubAPI, GitLabAPI, + ADOAPI, DebugAPI, ClaudeCodeAPI, McpAPI diff --git a/apps/frontend/src/preload/api/modules/ado-api.ts b/apps/frontend/src/preload/api/modules/ado-api.ts new file mode 100644 index 0000000000..872210af59 --- /dev/null +++ b/apps/frontend/src/preload/api/modules/ado-api.ts @@ -0,0 +1,268 @@ +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { IPCResult } from '../../../shared/types'; +import { invokeIpc, createIpcListener, IpcListenerCleanup } from './ipc-utils'; + +/** + * Azure DevOps Work Item (normalized format) + */ +export interface ADOWorkItem { + id: number; + number: number; + title: string; + body?: string; + state: 'open' | 'closed'; + workItemType: string; + tags: string[]; + assignees: Array<{ + login: string; + displayName: string; + avatarUrl?: string; + }>; + author: { + login: string; + displayName: string; + avatarUrl?: string; + }; + priority?: number; + iteration?: string; + areaPath?: string; + createdAt: string; + updatedAt: string; + closedAt?: string; + url: string; + htmlUrl: string; +} + +/** + * Azure DevOps Pull Request (normalized format) + */ +export interface ADOPullRequest { + id: number; + number: number; + title: string; + body?: string; + state: 'open' | 'closed' | 'merged'; + author: { + login: string; + displayName: string; + avatarUrl?: string; + }; + sourceBranch: string; + targetBranch: string; + isDraft: boolean; + mergeStatus?: string; + reviewers: Array<{ + login: string; + displayName: string; + avatarUrl?: string; + vote: number; + }>; + labels: string[]; + createdAt: string; + updatedAt: string; + closedAt?: string; + url: string; + htmlUrl: string; +} + +/** + * Azure DevOps Comment + */ +export interface ADOComment { + id: number; + content: string; + author: { + displayName: string; + uniqueName: string; + imageUrl?: string; + }; + publishedDate: string; + lastUpdatedDate: string; +} + +// Work Item Operations + +export async function checkADOConnection(projectId: string): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_CHECK_CONNECTION, projectId); +} + +export async function testADOConnection(credentials: { + organization: string; + project: string; + repoName: string; + pat: string; + instanceUrl: string; +}): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_TEST_CONNECTION, credentials); +} + +export async function getADOWorkItems( + projectId: string, + state: 'open' | 'closed' | 'all' = 'open' +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_GET_WORK_ITEMS, projectId, state); +} + +export async function getADOWorkItem( + projectId: string, + workItemId: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_GET_WORK_ITEM, projectId, workItemId); +} + +export async function createADOWorkItem( + projectId: string, + workItemType: string, + title: string, + body?: string, + tags?: string[] +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_CREATE_WORK_ITEM, projectId, workItemType, title, body, tags); +} + +export async function updateADOWorkItem( + projectId: string, + workItemId: number, + updates: { title?: string; body?: string; state?: string; tags?: string[] } +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_UPDATE_WORK_ITEM, projectId, workItemId, updates); +} + +export async function getADOWorkItemComments( + projectId: string, + workItemId: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_GET_WORK_ITEM_COMMENTS, projectId, workItemId); +} + +// Pull Request Operations + +export async function getADOPullRequests( + projectId: string, + status: 'active' | 'completed' | 'abandoned' | 'all' = 'active' +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_LIST, projectId, status); +} + +export async function getADOPullRequest( + projectId: string, + prId: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_GET, projectId, prId); +} + +export async function getADOPullRequestDiff( + projectId: string, + prId: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_GET_DIFF, projectId, prId); +} + +export async function postADOPRReview( + projectId: string, + prId: number, + comment: string, + vote?: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_POST_REVIEW, projectId, prId, comment, vote); +} + +export async function mergeADOPullRequest( + projectId: string, + prId: number, + mergeStrategy?: 'squash' | 'rebase' | 'noFastForward', + deleteSourceBranch?: boolean +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_MERGE, projectId, prId, mergeStrategy, deleteSourceBranch); +} + +export async function abandonADOPullRequest( + projectId: string, + prId: number +): Promise> { + return invokeIpc(IPC_CHANNELS.ADO_PR_ABANDON, projectId, prId); +} + +// Event Listeners + +export function onADOInvestigationProgress( + callback: (progress: { phase: string; progress: number; message: string }) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_INVESTIGATION_PROGRESS, callback); +} + +export function onADOInvestigationComplete( + callback: (result: unknown) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_INVESTIGATION_COMPLETE, callback); +} + +export function onADOInvestigationError( + callback: (error: string) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_INVESTIGATION_ERROR, callback); +} + +export function onADOPRReviewProgress( + callback: (progress: { phase: string; progress: number; message: string }) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_PR_REVIEW_PROGRESS, callback); +} + +export function onADOPRReviewComplete( + callback: (result: unknown) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_PR_REVIEW_COMPLETE, callback); +} + +export function onADOPRReviewError( + callback: (error: string) => void +): IpcListenerCleanup { + return createIpcListener(IPC_CHANNELS.ADO_PR_REVIEW_ERROR, callback); +} + +// API Interface and Factory + +export interface ADOAPI { + checkADOConnection: (projectId: string) => Promise>; + testADOConnection: (credentials: { organization: string; project: string; repoName: string; pat: string; instanceUrl: string }) => Promise>; + getADOWorkItems: (projectId: string, state?: 'open' | 'closed' | 'all') => Promise>; + getADOWorkItem: (projectId: string, workItemId: number) => Promise>; + createADOWorkItem: (projectId: string, workItemType: string, title: string, body?: string, tags?: string[]) => Promise>; + updateADOWorkItem: (projectId: string, workItemId: number, updates: { title?: string; body?: string; state?: string; tags?: string[] }) => Promise>; + getADOWorkItemComments: (projectId: string, workItemId: number) => Promise>; + getADOPullRequests: (projectId: string, status?: 'active' | 'completed' | 'abandoned' | 'all') => Promise>; + getADOPullRequest: (projectId: string, prId: number) => Promise>; + getADOPullRequestDiff: (projectId: string, prId: number) => Promise>; + postADOPRReview: (projectId: string, prId: number, comment: string, vote?: number) => Promise>; + mergeADOPullRequest: (projectId: string, prId: number, mergeStrategy?: 'squash' | 'rebase' | 'noFastForward', deleteSourceBranch?: boolean) => Promise>; + abandonADOPullRequest: (projectId: string, prId: number) => Promise>; + onADOInvestigationProgress: (callback: (progress: { phase: string; progress: number; message: string }) => void) => IpcListenerCleanup; + onADOInvestigationComplete: (callback: (result: unknown) => void) => IpcListenerCleanup; + onADOInvestigationError: (callback: (error: string) => void) => IpcListenerCleanup; + onADOPRReviewProgress: (callback: (progress: { phase: string; progress: number; message: string }) => void) => IpcListenerCleanup; + onADOPRReviewComplete: (callback: (result: unknown) => void) => IpcListenerCleanup; + onADOPRReviewError: (callback: (error: string) => void) => IpcListenerCleanup; +} + +export const createADOAPI = (): ADOAPI => ({ + checkADOConnection, + testADOConnection, + getADOWorkItems, + getADOWorkItem, + createADOWorkItem, + updateADOWorkItem, + getADOWorkItemComments, + getADOPullRequests, + getADOPullRequest, + getADOPullRequestDiff, + postADOPRReview, + mergeADOPullRequest, + abandonADOPullRequest, + onADOInvestigationProgress, + onADOInvestigationComplete, + onADOInvestigationError, + onADOPRReviewProgress, + onADOPRReviewComplete, + onADOPRReviewError, +}); diff --git a/apps/frontend/src/preload/api/modules/autobuild-api.ts b/apps/frontend/src/preload/api/modules/autobuild-api.ts deleted file mode 100644 index e0e7aca6a6..0000000000 --- a/apps/frontend/src/preload/api/modules/autobuild-api.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { IPC_CHANNELS } from '../../../shared/constants'; -import type { - AutoBuildSourceUpdateCheck, - AutoBuildSourceUpdateProgress, - IPCResult -} from '../../../shared/types'; -import { createIpcListener, invokeIpc, sendIpc, IpcListenerCleanup } from './ipc-utils'; - -/** - * Auto-Build Source Update API operations - */ -export interface AutoBuildAPI { - // Operations - checkAutoBuildSourceUpdate: () => Promise>; - downloadAutoBuildSourceUpdate: () => void; - getAutoBuildSourceVersion: () => Promise>; - - // Event Listeners - onAutoBuildSourceUpdateProgress: ( - callback: (progress: AutoBuildSourceUpdateProgress) => void - ) => IpcListenerCleanup; -} - -/** - * Creates the Auto-Build Source Update API implementation - */ -export const createAutoBuildAPI = (): AutoBuildAPI => ({ - // Operations - checkAutoBuildSourceUpdate: (): Promise> => - invokeIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_CHECK), - - downloadAutoBuildSourceUpdate: (): void => - sendIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_DOWNLOAD), - - getAutoBuildSourceVersion: (): Promise> => - invokeIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_VERSION), - - // Event Listeners - onAutoBuildSourceUpdateProgress: ( - callback: (progress: AutoBuildSourceUpdateProgress) => void - ): IpcListenerCleanup => - createIpcListener(IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, callback) -}); diff --git a/apps/frontend/src/preload/api/modules/github-api.ts b/apps/frontend/src/preload/api/modules/github-api.ts index 7436f87345..c08c834a35 100644 --- a/apps/frontend/src/preload/api/modules/github-api.ts +++ b/apps/frontend/src/preload/api/modules/github-api.ts @@ -125,6 +125,26 @@ export interface AnalyzePreviewResult { error?: string; } +/** + * Workflow run awaiting approval (for fork PRs) + */ +export interface WorkflowAwaitingApproval { + id: number; + name: string; + html_url: string; + workflow_name: string; +} + +/** + * Workflows awaiting approval result + */ +export interface WorkflowsAwaitingApprovalResult { + awaiting_approval: number; + workflow_runs: WorkflowAwaitingApproval[]; + can_approve: boolean; + error?: string; +} + /** * GitHub Integration API operations */ @@ -234,23 +254,30 @@ export interface GitHubAPI { ) => IpcListenerCleanup; // PR operations - listPRs: (projectId: string) => Promise; + listPRs: (projectId: string, page?: number) => Promise; + getPR: (projectId: string, prNumber: number) => Promise; runPRReview: (projectId: string, prNumber: number) => void; cancelPRReview: (projectId: string, prNumber: number) => Promise; - postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[]) => Promise; + postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }) => Promise; deletePRReview: (projectId: string, prNumber: number) => Promise; postPRComment: (projectId: string, prNumber: number, body: string) => Promise; mergePR: (projectId: string, prNumber: number, mergeMethod?: 'merge' | 'squash' | 'rebase') => Promise; assignPR: (projectId: string, prNumber: number, username: string) => Promise; getPRReview: (projectId: string, prNumber: number) => Promise; + getPRReviewsBatch: (projectId: string, prNumbers: number[]) => Promise>; // Follow-up review operations checkNewCommits: (projectId: string, prNumber: number) => Promise; + checkMergeReadiness: (projectId: string, prNumber: number) => Promise; runFollowupReview: (projectId: string, prNumber: number) => void; // PR logs getPRLogs: (projectId: string, prNumber: number) => Promise; + // Workflow approval (for fork PRs) + getWorkflowsAwaitingApproval: (projectId: string, prNumber: number) => Promise; + approveWorkflow: (projectId: string, runId: number) => Promise; + // PR event listeners onPRReviewProgress: ( callback: (projectId: string, progress: PRReviewProgress) => void @@ -320,6 +347,7 @@ export interface PRReviewResult { error?: string; // Follow-up review fields reviewedCommitSha?: string; + reviewedFileBlobs?: Record; // filename β†’ blob SHA for rebase-resistant follow-ups isFollowupReview?: boolean; previousReviewId?: number; resolvedFindings?: string[]; @@ -343,6 +371,21 @@ export interface NewCommitsCheck { hasCommitsAfterPosting?: boolean; } +/** + * Lightweight merge readiness check result + * Used for real-time validation of AI verdict freshness + */ +export interface MergeReadiness { + /** PR is in draft mode */ + isDraft: boolean; + /** GitHub's mergeable status */ + mergeable: 'MERGEABLE' | 'CONFLICTING' | 'UNKNOWN'; + /** Simplified CI status */ + ciStatus: 'passing' | 'failing' | 'pending' | 'none'; + /** List of blockers that contradict a "ready to merge" verdict */ + blockers: string[]; +} + /** * Review progress status */ @@ -585,8 +628,11 @@ export const createGitHubAPI = (): GitHubAPI => ({ createIpcListener(IPC_CHANNELS.GITHUB_AUTOFIX_ANALYZE_PREVIEW_ERROR, callback), // PR operations - listPRs: (projectId: string): Promise => - invokeIpc(IPC_CHANNELS.GITHUB_PR_LIST, projectId), + listPRs: (projectId: string, page: number = 1): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_LIST, projectId, page), + + getPR: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_GET, projectId, prNumber), runPRReview: (projectId: string, prNumber: number): void => sendIpc(IPC_CHANNELS.GITHUB_PR_REVIEW, projectId, prNumber), @@ -594,8 +640,8 @@ export const createGitHubAPI = (): GitHubAPI => ({ cancelPRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_REVIEW_CANCEL, projectId, prNumber), - postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[]): Promise => - invokeIpc(IPC_CHANNELS.GITHUB_PR_POST_REVIEW, projectId, prNumber, selectedFindingIds), + postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_POST_REVIEW, projectId, prNumber, selectedFindingIds, options), deletePRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_DELETE_REVIEW, projectId, prNumber), @@ -612,10 +658,16 @@ export const createGitHubAPI = (): GitHubAPI => ({ getPRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_REVIEW, projectId, prNumber), + getPRReviewsBatch: (projectId: string, prNumbers: number[]): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_REVIEWS_BATCH, projectId, prNumbers), + // Follow-up review operations checkNewCommits: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_CHECK_NEW_COMMITS, projectId, prNumber), + checkMergeReadiness: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_CHECK_MERGE_READINESS, projectId, prNumber), + runFollowupReview: (projectId: string, prNumber: number): void => sendIpc(IPC_CHANNELS.GITHUB_PR_FOLLOWUP_REVIEW, projectId, prNumber), @@ -623,6 +675,13 @@ export const createGitHubAPI = (): GitHubAPI => ({ getPRLogs: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_LOGS, projectId, prNumber), + // Workflow approval (for fork PRs) + getWorkflowsAwaitingApproval: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_WORKFLOWS_AWAITING_APPROVAL, projectId, prNumber), + + approveWorkflow: (projectId: string, runId: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_WORKFLOW_APPROVE, projectId, runId), + // PR event listeners onPRReviewProgress: ( callback: (projectId: string, progress: PRReviewProgress) => void diff --git a/apps/frontend/src/preload/api/modules/index.ts b/apps/frontend/src/preload/api/modules/index.ts index 48b4f8b2cf..e99c5c7c5b 100644 --- a/apps/frontend/src/preload/api/modules/index.ts +++ b/apps/frontend/src/preload/api/modules/index.ts @@ -11,6 +11,6 @@ export * from './insights-api'; export * from './changelog-api'; export * from './linear-api'; export * from './github-api'; -export * from './autobuild-api'; +export * from './ado-api'; export * from './shell-api'; export * from './debug-api'; diff --git a/apps/frontend/src/preload/api/profile-api.ts b/apps/frontend/src/preload/api/profile-api.ts new file mode 100644 index 0000000000..e285c6f10a --- /dev/null +++ b/apps/frontend/src/preload/api/profile-api.ts @@ -0,0 +1,144 @@ +import { ipcRenderer } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import type { IPCResult } from '../../shared/types'; +import type { + APIProfile, + ProfileFormData, + ProfilesFile, + TestConnectionResult, + DiscoverModelsResult +} from '@shared/types/profile'; + +export interface ProfileAPI { + // Get all profiles + getAPIProfiles: () => Promise>; + + // Save/create a profile + saveAPIProfile: ( + profile: ProfileFormData + ) => Promise>; + + // Update an existing profile + updateAPIProfile: ( + profile: APIProfile + ) => Promise>; + + // Delete a profile + deleteAPIProfile: (profileId: string) => Promise; + + // Set active profile (null to switch to OAuth) + setActiveAPIProfile: (profileId: string | null) => Promise; + + // Test API profile connection + testConnection: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ) => Promise>; + + // Discover available models from API + discoverModels: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ) => Promise>; +} + +let testConnectionRequestId = 0; +let discoverModelsRequestId = 0; + +export const createProfileAPI = (): ProfileAPI => ({ + // Get all profiles + getAPIProfiles: (): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_GET), + + // Save/create a profile + saveAPIProfile: ( + profile: ProfileFormData + ): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_SAVE, profile), + + // Update an existing profile + updateAPIProfile: ( + profile: APIProfile + ): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_UPDATE, profile), + + // Delete a profile + deleteAPIProfile: (profileId: string): Promise => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_DELETE, profileId), + + // Set active profile (null to switch to OAuth) + setActiveAPIProfile: (profileId: string | null): Promise => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_SET_ACTIVE, profileId), + + // Test API profile connection + testConnection: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ): Promise> => { + const requestId = ++testConnectionRequestId; + + // Check if already aborted before initiating request + if (signal && signal.aborted) { + return Promise.reject(new DOMException('The operation was aborted.', 'AbortError')); + } + + // Setup abort listener AFTER checking aborted status to avoid race condition + if (signal && typeof signal.addEventListener === 'function') { + try { + signal.addEventListener('abort', () => { + ipcRenderer.send(IPC_CHANNELS.PROFILES_TEST_CONNECTION_CANCEL, requestId); + }, { once: true }); + } catch (err) { + console.error('[preload/profile-api] Error adding abort listener:', err); + } + } else if (signal) { + console.warn('[preload/profile-api] signal provided but addEventListener not available - signal may have been serialized'); + } + + return ipcRenderer.invoke(IPC_CHANNELS.PROFILES_TEST_CONNECTION, baseUrl, apiKey, requestId); + }, + + // Discover available models from API + discoverModels: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ): Promise> => { + console.log('[preload/profile-api] discoverModels START'); + console.log('[preload/profile-api] baseUrl, apiKey:', baseUrl, apiKey?.slice(-4)); + + const requestId = ++discoverModelsRequestId; + console.log('[preload/profile-api] Request ID:', requestId); + + // Check if already aborted before initiating request + if (signal && signal.aborted) { + console.log('[preload/profile-api] Already aborted, rejecting'); + return Promise.reject(new DOMException('The operation was aborted.', 'AbortError')); + } + + // Setup abort listener AFTER checking aborted status to avoid race condition + if (signal && typeof signal.addEventListener === 'function') { + console.log('[preload/profile-api] Setting up abort listener...'); + try { + signal.addEventListener('abort', () => { + console.log('[preload/profile-api] Abort signal received for request:', requestId); + ipcRenderer.send(IPC_CHANNELS.PROFILES_DISCOVER_MODELS_CANCEL, requestId); + }, { once: true }); + console.log('[preload/profile-api] Abort listener added successfully'); + } catch (err) { + console.error('[preload/profile-api] Error adding abort listener:', err); + } + } else if (signal) { + console.warn('[preload/profile-api] signal provided but addEventListener not available - signal may have been serialized'); + } + + const channel = 'profiles:discover-models'; + console.log('[preload/profile-api] About to invoke IPC channel:', channel); + const promise = ipcRenderer.invoke(channel, baseUrl, apiKey, requestId); + console.log('[preload/profile-api] IPC invoke called, promise returned'); + return promise; + } +}); diff --git a/apps/frontend/src/preload/api/settings-api.ts b/apps/frontend/src/preload/api/settings-api.ts index 263c32d084..1c1f8752f9 100644 --- a/apps/frontend/src/preload/api/settings-api.ts +++ b/apps/frontend/src/preload/api/settings-api.ts @@ -28,6 +28,11 @@ export interface SettingsAPI { getSourceEnv: () => Promise>; updateSourceEnv: (config: { claudeOAuthToken?: string }) => Promise; checkSourceToken: () => Promise>; + + // Sentry error reporting + notifySentryStateChanged: (enabled: boolean) => void; + getSentryDsn: () => Promise; + getSentryConfig: () => Promise<{ dsn: string; tracesSampleRate: number; profilesSampleRate: number }>; } export const createSettingsAPI = (): SettingsAPI => ({ @@ -59,5 +64,17 @@ export const createSettingsAPI = (): SettingsAPI => ({ ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, config), checkSourceToken: (): Promise> => - ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN) + ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN), + + // Sentry error reporting - notify main process when setting changes + notifySentryStateChanged: (enabled: boolean): void => + ipcRenderer.send(IPC_CHANNELS.SENTRY_STATE_CHANGED, enabled), + + // Get Sentry DSN from main process (loaded from environment variable) + getSentryDsn: (): Promise => + ipcRenderer.invoke(IPC_CHANNELS.GET_SENTRY_DSN), + + // Get full Sentry config from main process (DSN + sample rates) + getSentryConfig: (): Promise<{ dsn: string; tracesSampleRate: number; profilesSampleRate: number }> => + ipcRenderer.invoke(IPC_CHANNELS.GET_SENTRY_CONFIG) }); diff --git a/apps/frontend/src/preload/api/terminal-api.ts b/apps/frontend/src/preload/api/terminal-api.ts index 14aaa3e507..e7b509f81b 100644 --- a/apps/frontend/src/preload/api/terminal-api.ts +++ b/apps/frontend/src/preload/api/terminal-api.ts @@ -1,12 +1,21 @@ import { ipcRenderer } from 'electron'; import { IPC_CHANNELS } from '../../shared/constants'; + +// Increase max listeners to accommodate 12 terminals with multiple event types +// Each terminal can have listeners for: output, exit, titleChange, claudeSession, etc. +// Default is 10, but with 12 terminals we need more headroom +ipcRenderer.setMaxListeners(50); + import type { IPCResult, TerminalCreateOptions, RateLimitInfo, ClaudeProfile, ClaudeProfileSettings, - ClaudeUsageSnapshot + ClaudeUsageSnapshot, + CreateTerminalWorktreeRequest, + TerminalWorktreeConfig, + TerminalWorktreeResult, } from '../../shared/types'; /** Type for proactive swap notification events */ @@ -25,6 +34,8 @@ export interface TerminalAPI { resizeTerminal: (id: string, cols: number, rows: number) => void; invokeClaudeInTerminal: (id: string, cwd?: string) => void; generateTerminalName: (command: string, cwd?: string) => Promise>; + setTerminalTitle: (id: string, title: string) => void; + setTerminalWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined) => void; // Terminal Session Management getTerminalSessions: (projectPath: string) => Promise>; @@ -48,6 +59,11 @@ export interface TerminalAPI { ) => Promise>; checkTerminalPtyAlive: (terminalId: string) => Promise>; + // Terminal Worktree Operations (isolated development) + createTerminalWorktree: (request: CreateTerminalWorktreeRequest) => Promise; + listTerminalWorktrees: (projectPath: string) => Promise>; + removeTerminalWorktree: (projectPath: string, name: string, deleteBranch?: boolean) => Promise; + // Terminal Event Listeners onTerminalOutput: (callback: (id: string, data: string) => void) => () => void; onTerminalExit: (callback: (id: string, exitCode: number) => void) => () => void; @@ -57,6 +73,10 @@ export interface TerminalAPI { onTerminalOAuthToken: ( callback: (info: { terminalId: string; profileId?: string; email?: string; success: boolean; message?: string; detectedAt: string }) => void ) => () => void; + onTerminalAuthCreated: ( + callback: (info: { terminalId: string; profileId: string; profileName: string }) => void + ) => () => void; + onTerminalClaudeBusy: (callback: (id: string, isBusy: boolean) => void) => () => void; // Claude Profile Management getClaudeProfiles: () => Promise>; @@ -100,6 +120,12 @@ export const createTerminalAPI = (): TerminalAPI => ({ generateTerminalName: (command: string, cwd?: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_GENERATE_NAME, command, cwd), + setTerminalTitle: (id: string, title: string): void => + ipcRenderer.send(IPC_CHANNELS.TERMINAL_SET_TITLE, id, title), + + setTerminalWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined): void => + ipcRenderer.send(IPC_CHANNELS.TERMINAL_SET_WORKTREE_CONFIG, id, config), + // Terminal Session Management getTerminalSessions: (projectPath: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_GET_SESSIONS, projectPath), @@ -137,6 +163,16 @@ export const createTerminalAPI = (): TerminalAPI => ({ checkTerminalPtyAlive: (terminalId: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_CHECK_PTY_ALIVE, terminalId), + // Terminal Worktree Operations (isolated development) + createTerminalWorktree: (request: CreateTerminalWorktreeRequest): Promise => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_CREATE, request), + + listTerminalWorktrees: (projectPath: string): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_LIST, projectPath), + + removeTerminalWorktree: (projectPath: string, name: string, deleteBranch: boolean = false): Promise => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_REMOVE, projectPath, name, deleteBranch), + // Terminal Event Listeners onTerminalOutput: ( callback: (id: string, data: string) => void @@ -232,6 +268,37 @@ export const createTerminalAPI = (): TerminalAPI => ({ }; }, + onTerminalAuthCreated: ( + callback: (info: { terminalId: string; profileId: string; profileName: string }) => void + ): (() => void) => { + const handler = ( + _event: Electron.IpcRendererEvent, + info: { terminalId: string; profileId: string; profileName: string } + ): void => { + callback(info); + }; + ipcRenderer.on(IPC_CHANNELS.TERMINAL_AUTH_CREATED, handler); + return () => { + ipcRenderer.removeListener(IPC_CHANNELS.TERMINAL_AUTH_CREATED, handler); + }; + }, + + onTerminalClaudeBusy: ( + callback: (id: string, isBusy: boolean) => void + ): (() => void) => { + const handler = ( + _event: Electron.IpcRendererEvent, + id: string, + isBusy: boolean + ): void => { + callback(id, isBusy); + }; + ipcRenderer.on(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, handler); + return () => { + ipcRenderer.removeListener(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, handler); + }; + }, + // Claude Profile Management getClaudeProfiles: (): Promise> => ipcRenderer.invoke(IPC_CHANNELS.CLAUDE_PROFILES_GET), diff --git a/apps/frontend/src/renderer/App.tsx b/apps/frontend/src/renderer/App.tsx index e8a9289b56..9399461033 100644 --- a/apps/frontend/src/renderer/App.tsx +++ b/apps/frontend/src/renderer/App.tsx @@ -16,6 +16,7 @@ import { } from '@dnd-kit/sortable'; import { TooltipProvider } from './components/ui/tooltip'; import { Button } from './components/ui/button'; +import { Toaster } from './components/ui/toaster'; import { Dialog, DialogContent, @@ -39,6 +40,7 @@ import { GitHubIssues } from './components/GitHubIssues'; import { GitLabIssues } from './components/GitLabIssues'; import { GitHubPRs } from './components/github-prs'; import { GitLabMergeRequests } from './components/gitlab-merge-requests'; +import { ADOWorkItems } from './components/ADOWorkItems'; import { Changelog } from './components/Changelog'; import { Worktrees } from './components/Worktrees'; import { AgentTools } from './components/AgentTools'; @@ -51,7 +53,8 @@ import { ProactiveSwapListener } from './components/ProactiveSwapListener'; import { GitHubSetupModal } from './components/GitHubSetupModal'; import { useProjectStore, loadProjects, addProject, initializeProject, removeProject } from './stores/project-store'; import { useTaskStore, loadTasks } from './stores/task-store'; -import { useSettingsStore, loadSettings } from './stores/settings-store'; +import { useSettingsStore, loadSettings, loadProfiles } from './stores/settings-store'; +import { useClaudeProfileStore } from './stores/claude-profile-store'; import { useTerminalStore, restoreTerminalSessions } from './stores/terminal-store'; import { initializeGitHubListeners } from './stores/github'; import { initDownloadProgressListener } from './stores/download-store'; @@ -61,10 +64,9 @@ import { COLOR_THEMES, UI_SCALE_MIN, UI_SCALE_MAX, UI_SCALE_DEFAULT } from '../s import type { Task, Project, ColorTheme } from '../shared/types'; import { ProjectTabBar } from './components/ProjectTabBar'; import { AddProjectModal } from './components/AddProjectModal'; -import { ViewStateProvider, useViewState } from './contexts/ViewStateContext'; +import { ViewStateProvider } from './contexts/ViewStateContext'; -// Wrapper component that connects ProjectTabBar to ViewStateContext -// (needed because App renders the Provider and can't use useViewState directly) +// Wrapper component for ProjectTabBar interface ProjectTabBarWithContextProps { projects: Project[]; activeProjectId: string | null; @@ -72,7 +74,6 @@ interface ProjectTabBarWithContextProps { onProjectClose: (projectId: string) => void; onAddProject: () => void; onSettingsClick: () => void; - tasks: Task[]; } function ProjectTabBarWithContext({ @@ -81,12 +82,8 @@ function ProjectTabBarWithContext({ onProjectSelect, onProjectClose, onAddProject, - onSettingsClick, - tasks + onSettingsClick }: ProjectTabBarWithContextProps) { - const { showArchived, toggleShowArchived } = useViewState(); - const archivedCount = tasks.filter(t => t.metadata?.archivedAt).length; - return ( ); } @@ -119,6 +113,13 @@ export function App() { const settings = useSettingsStore((state) => state.settings); const settingsLoading = useSettingsStore((state) => state.isLoading); + // API Profile state + const profiles = useSettingsStore((state) => state.profiles); + const activeProfileId = useSettingsStore((state) => state.activeProfileId); + + // Claude Profile state (OAuth) + const claudeProfiles = useClaudeProfileStore((state) => state.profiles); + // UI State const [selectedTask, setSelectedTask] = useState(null); const [isNewTaskDialogOpen, setIsNewTaskDialogOpen] = useState(false); @@ -167,6 +168,7 @@ export function App() { useEffect(() => { loadProjects(); loadSettings(); + loadProfiles(); // Initialize global GitHub listeners (PR reviews, etc.) so they persist across navigation initializeGitHubListeners(); // Initialize global download progress listener for Ollama model downloads @@ -239,10 +241,21 @@ export function App() { // First-run detection - show onboarding wizard if not completed // Only check AFTER settings have been loaded from disk to avoid race condition useEffect(() => { - if (settingsHaveLoaded && settings.onboardingCompleted === false) { + // Check if either auth method is configured + // API profiles: if profiles exist, auth is configured (user has gone through setup) + const hasAPIProfileConfigured = profiles.length > 0; + const hasOAuthConfigured = claudeProfiles.some(p => + p.oauthToken || (p.isDefault && p.configDir) + ); + const hasAnyAuth = hasAPIProfileConfigured || hasOAuthConfigured; + + // Only show wizard if onboarding not completed AND no auth is configured + if (settingsHaveLoaded && + settings.onboardingCompleted === false && + !hasAnyAuth) { setIsOnboardingWizardOpen(true); } - }, [settingsHaveLoaded, settings.onboardingCompleted]); + }, [settingsHaveLoaded, settings.onboardingCompleted, profiles, claudeProfiles]); // Sync i18n language with settings const { t, i18n } = useTranslation('dialogs'); @@ -700,7 +713,6 @@ export function App() { onProjectClose={handleProjectTabClose} onAddProject={handleAddProject} onSettingsClick={() => setIsSettingsDialogOpen(true)} - tasks={tasks} /> @@ -790,6 +802,19 @@ export function App() { }} /> )} + {activeView === 'ado-work-items' && (activeProjectId || selectedProjectId) && ( + { + setSettingsInitialProjectSection('integrations'); + setIsSettingsDialogOpen(true); + }} + /> + )} + {activeView === 'ado-pull-requests' && (activeProjectId || selectedProjectId) && ( +
+ ADO Pull Requests view coming soon +
+ )} {activeView === 'changelog' && (activeProjectId || selectedProjectId) && ( )} @@ -1001,6 +1026,9 @@ export function App() { {/* Global Download Indicator - shows Ollama model download progress */} + + {/* Toast notifications */} + diff --git a/apps/frontend/src/renderer/components/ADOSetupModal.tsx b/apps/frontend/src/renderer/components/ADOSetupModal.tsx new file mode 100644 index 0000000000..958300c96d --- /dev/null +++ b/apps/frontend/src/renderer/components/ADOSetupModal.tsx @@ -0,0 +1,390 @@ +import { useState, useEffect } from 'react'; +import { + Cloud, + Key, + Loader2, + CheckCircle2, + AlertCircle, + ExternalLink, + Building2, + FolderGit2, + GitBranch, +} from 'lucide-react'; +import { Button } from './ui/button'; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from './ui/dialog'; +import { Label } from './ui/label'; +import { Input } from './ui/input'; +import type { Project } from '../../shared/types'; + +interface ADOSetupModalProps { + open: boolean; + onOpenChange: (open: boolean) => void; + project: Project; + onComplete: (settings: { + adoOrganization: string; + adoProject: string; + adoRepoName: string; + adoPat: string; + adoInstanceUrl: string; + }) => void; + onSkip?: () => void; +} + +type SetupStep = 'credentials' | 'testing' | 'complete'; + +/** + * Azure DevOps Setup Modal + * + * Allows users to configure their ADO connection with: + * 1. Organization name + * 2. Project name + * 3. Repository name (optional, defaults to project) + * 4. Personal Access Token (PAT) + * 5. Instance URL (for on-prem, defaults to dev.azure.com) + */ +export function ADOSetupModal({ + open, + onOpenChange, + project, + onComplete, + onSkip, +}: ADOSetupModalProps) { + const [step, setStep] = useState('credentials'); + const [organization, setOrganization] = useState(''); + const [adoProject, setAdoProject] = useState(''); + const [repoName, setRepoName] = useState(''); + const [pat, setPat] = useState(''); + const [instanceUrl, setInstanceUrl] = useState('https://dev.azure.com'); + const [isOnPrem, setIsOnPrem] = useState(false); + const [isTesting, setIsTesting] = useState(false); + const [error, setError] = useState(null); + const [testSuccess, setTestSuccess] = useState(false); + + // Reset state when modal opens + useEffect(() => { + if (open) { + setStep('credentials'); + setOrganization(''); + setAdoProject(''); + setRepoName(''); + setPat(''); + setInstanceUrl('https://dev.azure.com'); + setIsOnPrem(false); + setError(null); + setTestSuccess(false); + setIsTesting(false); + } + }, [open]); + + // Test the connection + const testConnection = async () => { + if (!organization || !adoProject || !pat) { + setError('Please fill in all required fields'); + return; + } + + setIsTesting(true); + setError(null); + setStep('testing'); + + try { + // Call the ADO test connection IPC with credentials + const result = await window.electronAPI.ado.testADOConnection({ + organization, + project: adoProject, + repoName: repoName || adoProject, + pat, + instanceUrl, + }); + + if (result.success) { + setTestSuccess(true); + setStep('complete'); + } else { + setError(result.error || 'Failed to connect to Azure DevOps'); + setStep('credentials'); + } + } catch (err) { + setError(err instanceof Error ? err.message : 'Connection test failed'); + setStep('credentials'); + } finally { + setIsTesting(false); + } + }; + + // Save and complete setup + const handleComplete = () => { + onComplete({ + adoOrganization: organization, + adoProject: adoProject, + adoRepoName: repoName || adoProject, + adoPat: pat, + adoInstanceUrl: instanceUrl, + }); + }; + + // Skip without saving (for testing the flow without real connection) + const handleSaveWithoutTest = () => { + if (!organization || !adoProject || !pat) { + setError('Please fill in all required fields'); + return; + } + handleComplete(); + }; + + const renderCredentialsStep = () => ( + <> + + + + Connect to Azure DevOps + + + Configure your Azure DevOps connection to sync work items and pull requests. + + + +
+ {/* PAT Help Link */} +
+
+ +
+

+ You'll need a Personal Access Token (PAT) +

+

+ Create one at your ADO organization settings with{' '} + Code (Read & Write) and{' '} + Work Items (Read & Write) scopes. +

+ + Create PAT on Azure DevOps + + +
+
+
+ + {/* Organization */} +
+ + setOrganization(e.target.value)} + placeholder="myorganization" + disabled={isTesting} + /> +

+ Your ADO organization name (from dev.azure.com/org-name) +

+
+ + {/* Project */} +
+ + setAdoProject(e.target.value)} + placeholder="MyProject" + disabled={isTesting} + /> +
+ + {/* Repository (optional) */} +
+ + setRepoName(e.target.value)} + placeholder={adoProject || 'Same as project name'} + disabled={isTesting} + /> +

+ Leave empty to use the project name as repo name +

+
+ + {/* PAT */} +
+ + setPat(e.target.value)} + placeholder="Enter your PAT" + disabled={isTesting} + /> +
+ + {/* On-prem toggle */} +
+
+ { + setIsOnPrem(e.target.checked); + if (!e.target.checked) { + setInstanceUrl('https://dev.azure.com'); + } else { + setInstanceUrl(''); + } + }} + className="rounded border-gray-300" + disabled={isTesting} + /> + +
+ + {isOnPrem && ( +
+ setInstanceUrl(e.target.value)} + placeholder="https://devops.yourcompany.com" + disabled={isTesting} + /> +

+ Your on-premises Azure DevOps Server URL +

+
+ )} +
+ + {error && ( +
+ + {error} +
+ )} +
+ + + {onSkip && ( + + )} + + + + + ); + + const renderTestingStep = () => ( + <> + + + + Testing Connection + + + +
+
+ +
+

+ Connecting to Azure DevOps... +

+

+ {organization}/{adoProject} +

+
+ + ); + + const renderCompleteStep = () => ( + <> + + + + Connection Successful + + + +
+
+ +
+

+ Successfully connected to Azure DevOps! +

+

+ {organization}/{adoProject} +

+
+ + + + + + ); + + return ( + + + {step === 'credentials' && renderCredentialsStep()} + {step === 'testing' && renderTestingStep()} + {step === 'complete' && renderCompleteStep()} + + + ); +} diff --git a/apps/frontend/src/renderer/components/ADOWorkItems.tsx b/apps/frontend/src/renderer/components/ADOWorkItems.tsx new file mode 100644 index 0000000000..cdfb1a2526 --- /dev/null +++ b/apps/frontend/src/renderer/components/ADOWorkItems.tsx @@ -0,0 +1,483 @@ +import { useState, useCallback, useMemo, useEffect } from 'react'; +import { + RefreshCw, + AlertCircle, + CheckCircle2, + Circle, + Search, + Settings, + Loader2, + Bug, + BookOpen, + Zap, + ChevronRight, + Tag, + User, + Calendar, + ExternalLink, + Cloud, +} from 'lucide-react'; +import { Button } from './ui/button'; +import { Input } from './ui/input'; +import { Badge } from './ui/badge'; +import { ScrollArea } from './ui/scroll-area'; +import { useProjectStore } from '../stores/project-store'; +import { ADOSetupModal } from './ADOSetupModal'; +import type { ADOWorkItem } from '../../preload/api/modules/ado-api'; + +// Work item type icons +const workItemTypeIcons: Record> = { + Bug: Bug, + Task: CheckCircle2, + 'User Story': BookOpen, + Feature: Zap, + Epic: Zap, +}; + +interface ADOWorkItemsProps { + onOpenSettings?: () => void; +} + +export function ADOWorkItems({ onOpenSettings }: ADOWorkItemsProps) { + const projects = useProjectStore((state) => state.projects); + const selectedProjectId = useProjectStore((state) => state.selectedProjectId); + const selectedProject = projects.find((p) => p.id === selectedProjectId); + + const [workItems, setWorkItems] = useState([]); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [isConnected, setIsConnected] = useState(null); + const [selectedWorkItem, setSelectedWorkItem] = useState(null); + const [searchQuery, setSearchQuery] = useState(''); + const [filterState, setFilterState] = useState<'open' | 'closed' | 'all'>('open'); + const [showSetupModal, setShowSetupModal] = useState(false); + + // Check connection status + const checkConnection = useCallback(async () => { + if (!selectedProject?.id) return; + + try { + const result = await window.electronAPI.ado.checkADOConnection(selectedProject.id); + setIsConnected(result.success); + if (!result.success) { + setError(result.error || 'Not connected to Azure DevOps'); + } + } catch (err) { + setIsConnected(false); + setError(err instanceof Error ? err.message : 'Failed to check connection'); + } + }, [selectedProject?.id]); + + // Fetch work items + const fetchWorkItems = useCallback(async () => { + if (!selectedProject?.id || !isConnected) return; + + setIsLoading(true); + setError(null); + + try { + const result = await window.electronAPI.ado.getADOWorkItems(selectedProject.id, filterState); + if (result.success && result.data) { + setWorkItems(result.data); + } else { + setError(result.error || 'Failed to fetch work items'); + } + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to fetch work items'); + } finally { + setIsLoading(false); + } + }, [selectedProject?.id, isConnected, filterState]); + + // Initial load + useEffect(() => { + checkConnection(); + }, [checkConnection]); + + // Fetch work items when connected or filter changes + useEffect(() => { + if (isConnected) { + fetchWorkItems(); + } + }, [fetchWorkItems, isConnected]); + + // Filter work items by search query + const filteredWorkItems = useMemo(() => { + if (!searchQuery) return workItems; + const query = searchQuery.toLowerCase(); + return workItems.filter( + (wi) => + wi.title.toLowerCase().includes(query) || + wi.id.toString().includes(query) || + wi.workItemType.toLowerCase().includes(query) || + wi.tags.some((tag) => tag.toLowerCase().includes(query)) + ); + }, [workItems, searchQuery]); + + // Handle refresh + const handleRefresh = useCallback(() => { + checkConnection(); + if (isConnected) { + fetchWorkItems(); + } + }, [checkConnection, fetchWorkItems, isConnected]); + + // Handle setup complete + const handleSetupComplete = useCallback(async (settings: { + adoOrganization: string; + adoProject: string; + adoRepoName: string; + adoPat: string; + adoInstanceUrl: string; + }) => { + if (!selectedProject?.id) return; + + try { + // Save ADO settings to project .env file + await window.electronAPI.updateProjectEnv(selectedProject.id, { + adoEnabled: true, + adoOrganization: settings.adoOrganization, + adoProject: settings.adoProject, + adoRepoName: settings.adoRepoName, + adoPat: settings.adoPat, + adoInstanceUrl: settings.adoInstanceUrl, + }); + + setShowSetupModal(false); + // Re-check connection after settings are saved + setTimeout(() => { + handleRefresh(); + }, 100); + } catch (error) { + console.error('Failed to save ADO settings:', error); + } + }, [selectedProject?.id, handleRefresh]); + + // Not connected state + if (isConnected === false) { + return ( +
+
+ +
+

Connect to Azure DevOps

+

+ {error || 'Configure your Azure DevOps connection to view and manage work items.'} +

+
+ + {onOpenSettings && ( + + )} +
+ + {selectedProject && ( + setShowSetupModal(false)} + /> + )} +
+ ); + } + + // Loading initial connection + if (isConnected === null) { + return ( +
+ +
+ ); + } + + return ( +
+ {/* Header */} +
+
+
+ +

Azure DevOps Work Items

+ + {filteredWorkItems.length} items + +
+
+ + +
+
+ + {/* Search and Filter */} +
+
+ + setSearchQuery(e.target.value)} + className="pl-8" + /> +
+
+ {(['open', 'closed', 'all'] as const).map((state) => ( + + ))} +
+
+
+ + {/* Content */} +
+ {/* Work Items List */} +
+ {error ? ( +
+
+ +

{error}

+ +
+
+ ) : filteredWorkItems.length === 0 ? ( +
+
+ +

+ {searchQuery ? 'No matching work items' : 'No work items found'} +

+
+
+ ) : ( + +
+ {filteredWorkItems.map((wi) => { + const TypeIcon = workItemTypeIcons[wi.workItemType] || Circle; + const isSelected = selectedWorkItem?.id === wi.id; + + return ( + + ); + })} +
+
+ )} +
+ + {/* Work Item Detail */} +
+ {selectedWorkItem ? ( + + ) : ( +
+
+ +

Select a work item to view details

+
+
+ )} +
+
+ + {/* Setup Modal */} + {selectedProject && ( + setShowSetupModal(false)} + /> + )} +
+ ); +} + +// Work Item Detail Component +function WorkItemDetail({ workItem }: { workItem: ADOWorkItem }) { + const TypeIcon = workItemTypeIcons[workItem.workItemType] || Circle; + + return ( + +
+ {/* Header */} +
+
+
+ + + {workItem.workItemType} #{workItem.id} + + + {workItem.state} + +
+

{workItem.title}

+
+ +
+ + {/* Metadata */} +
+
+ + Author: + {workItem.author.displayName} +
+ {workItem.assignees.length > 0 && ( +
+ + Assigned: + {workItem.assignees.map((a) => a.displayName).join(', ')} +
+ )} +
+ + Created: + {new Date(workItem.createdAt).toLocaleDateString()} +
+ {workItem.iteration && ( +
+ + Iteration: + {workItem.iteration.split('\\').pop()} +
+ )} +
+ + {/* Tags */} + {workItem.tags.length > 0 && ( +
+
+ + Tags: +
+
+ {workItem.tags.map((tag) => ( + + {tag} + + ))} +
+
+ )} + + {/* Description */} + {workItem.body && ( +
+

Description

+
+
+ )} +
+ + ); +} diff --git a/apps/frontend/src/renderer/components/AddFeatureDialog.tsx b/apps/frontend/src/renderer/components/AddFeatureDialog.tsx index d29e2b977e..d139298b93 100644 --- a/apps/frontend/src/renderer/components/AddFeatureDialog.tsx +++ b/apps/frontend/src/renderer/components/AddFeatureDialog.tsx @@ -208,6 +208,7 @@ export function AddFeatureDialog({ value={title} onChange={(e) => setTitle(e.target.value)} disabled={isSaving} + aria-required="true" />
@@ -223,6 +224,7 @@ export function AddFeatureDialog({ onChange={(e) => setDescription(e.target.value)} rows={3} disabled={isSaving} + aria-required="true" /> @@ -253,7 +255,7 @@ export function AddFeatureDialog({ onValueChange={setPhaseId} disabled={isSaving} > - + @@ -338,7 +340,7 @@ export function AddFeatureDialog({ {/* Error */} {error && ( -
+
{error}
diff --git a/apps/frontend/src/renderer/components/AddProjectModal.tsx b/apps/frontend/src/renderer/components/AddProjectModal.tsx index fa8db82c41..852f3febcd 100644 --- a/apps/frontend/src/renderer/components/AddProjectModal.tsx +++ b/apps/frontend/src/renderer/components/AddProjectModal.tsx @@ -167,6 +167,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje 'bg-card hover:bg-accent hover:border-accent transition-all duration-200', 'text-left group' )} + aria-label={t('addProject.openExistingAriaLabel')} >
@@ -188,6 +189,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje 'bg-card hover:bg-accent hover:border-accent transition-all duration-200', 'text-left group' )} + aria-label={t('addProject.createNewAriaLabel')} >
@@ -203,7 +205,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje
{error && ( -
+
{error}
)} @@ -272,7 +274,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje
{error && ( -
+
{error}
)} diff --git a/apps/frontend/src/renderer/components/AgentProfileSelector.tsx b/apps/frontend/src/renderer/components/AgentProfileSelector.tsx index 6d23cb97ad..fa74affa25 100644 --- a/apps/frontend/src/renderer/components/AgentProfileSelector.tsx +++ b/apps/frontend/src/renderer/components/AgentProfileSelector.tsx @@ -96,23 +96,18 @@ export function AgentProfileSelector({ if (selectedId === 'custom') { // Keep current model/thinking level, just mark as custom onProfileChange('custom', model as ModelType || 'sonnet', thinkingLevel as ThinkingLevel || 'medium'); - } else if (selectedId === 'auto') { - // Auto profile - set defaults - const autoProfile = DEFAULT_AGENT_PROFILES.find(p => p.id === 'auto'); - if (autoProfile) { - onProfileChange('auto', autoProfile.model, autoProfile.thinkingLevel); - // Initialize phase configs with defaults if callback provided - if (onPhaseModelsChange && autoProfile.phaseModels) { - onPhaseModelsChange(autoProfile.phaseModels); - } - if (onPhaseThinkingChange && autoProfile.phaseThinking) { - onPhaseThinkingChange(autoProfile.phaseThinking); - } - } } else { + // Select preset profile - all profiles now have phase configs const profile = DEFAULT_AGENT_PROFILES.find(p => p.id === selectedId); if (profile) { onProfileChange(profile.id, profile.model, profile.thinkingLevel); + // Initialize phase configs with profile defaults if callbacks provided + if (onPhaseModelsChange && profile.phaseModels) { + onPhaseModelsChange(profile.phaseModels); + } + if (onPhaseThinkingChange && profile.phaseThinking) { + onPhaseThinkingChange(profile.phaseThinking); + } } } }; @@ -193,10 +188,7 @@ export function AgentProfileSelector({
{profile.name} - {profile.isAutoProfile - ? '(per-phase optimization)' - : `(${modelLabel} + ${profile.thinkingLevel})` - } + ({modelLabel} + {profile.thinkingLevel})
@@ -221,8 +213,8 @@ export function AgentProfileSelector({

- {/* Auto Profile - Phase Configuration */} - {isAuto && ( + {/* Phase Configuration - shown for all preset profiles */} + {!isCustom && (
{/* Clickable Header */} + + +
+
+ Authentication + {isOAuth ? 'OAuth' : 'API Profile'} +
+ {!isOAuth && authStatus.name && ( + <> +
+
+ Using profile: {authStatus.name} +
+ + )} +
+ + + + ); +} diff --git a/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx b/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx index 0674400602..726982faf5 100644 --- a/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx +++ b/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx @@ -294,9 +294,10 @@ export function ClaudeCodeStatusBadge({ className }: ClaudeCodeStatusBadgeProps) size="sm" className="w-full text-xs text-muted-foreground gap-1" onClick={() => window.electronAPI?.openExternal?.('https://claude.ai/code')} + aria-label={t('navigation:claudeCode.learnMoreAriaLabel', 'Learn more about Claude Code (opens in new window)')} > {t('navigation:claudeCode.learnMore', 'Learn more about Claude Code')} - +
diff --git a/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx b/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx index c71043d72b..53f47767f7 100644 --- a/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx +++ b/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { TrendingUp, ExternalLink, AlertCircle } from 'lucide-react'; import { Dialog, @@ -21,6 +22,8 @@ export function CompetitorAnalysisViewer({ open, onOpenChange, }: CompetitorAnalysisViewerProps) { + const { t } = useTranslation('common'); + if (!analysis) return null; return ( @@ -66,9 +69,11 @@ export function CompetitorAnalysisViewer({ target="_blank" rel="noopener noreferrer" className="text-primary hover:underline flex items-center gap-1 text-sm ml-4" + aria-label={t('accessibility.visitExternalLink', { name: competitor.name })} > - +
diff --git a/apps/frontend/src/renderer/components/EnvConfigModal.tsx b/apps/frontend/src/renderer/components/EnvConfigModal.tsx index f7c95bff4d..f35138b819 100644 --- a/apps/frontend/src/renderer/components/EnvConfigModal.tsx +++ b/apps/frontend/src/renderer/components/EnvConfigModal.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect } from 'react'; +import { useState, useEffect, useCallback } from 'react'; import { AlertCircle, Key, @@ -13,6 +13,7 @@ import { ChevronDown, ChevronRight } from 'lucide-react'; +import { useSettingsStore } from '../stores/settings-store'; import { Dialog, DialogContent, @@ -592,35 +593,51 @@ export function EnvConfigModal({ /** * Hook to check if the Claude token is configured * Returns { hasToken, isLoading, checkToken } + * + * This combines two sources of authentication: + * 1. OAuth token from source .env (checked via checkSourceToken) + * 2. Active API profile (custom Anthropic-compatible endpoint) */ export function useClaudeTokenCheck() { const [hasToken, setHasToken] = useState(null); const [isLoading, setIsLoading] = useState(true); const [error, setError] = useState(null); - const checkToken = async () => { + // Get active API profile from settings store + const activeProfileId = useSettingsStore((state) => state.activeProfileId); + + const checkToken = useCallback(async () => { setIsLoading(true); setError(null); + // Compute once - activeProfileId is captured from closure + const hasAPIProfile = !!activeProfileId; + try { const result = await window.electronAPI.checkSourceToken(); - if (result.success && result.data) { - setHasToken(result.data.hasToken); - } else { - setHasToken(false); + const hasSourceOAuthToken = result.success && result.data?.hasToken; + + // Auth is valid if either OAuth token OR API profile exists + setHasToken(hasSourceOAuthToken || hasAPIProfile); + + // Set error if OAuth check failed and no API profile fallback + if (!result.success && !hasAPIProfile) { setError(result.error || 'Failed to check token'); } } catch (err) { - setHasToken(false); - setError(err instanceof Error ? err.message : 'Unknown error'); + // Even if OAuth check fails, API profile is still valid auth + setHasToken(hasAPIProfile); + if (!hasAPIProfile) { + setError(err instanceof Error ? err.message : 'Unknown error'); + } } finally { setIsLoading(false); } - }; + }, [activeProfileId]); useEffect(() => { checkToken(); - }, []); + }, [checkToken]); // Re-check when checkToken changes (i.e., when activeProfileId changes) return { hasToken, isLoading, error, checkToken }; } diff --git a/apps/frontend/src/renderer/components/FileExplorerPanel.tsx b/apps/frontend/src/renderer/components/FileExplorerPanel.tsx index e7c0b98042..5c598d842f 100644 --- a/apps/frontend/src/renderer/components/FileExplorerPanel.tsx +++ b/apps/frontend/src/renderer/components/FileExplorerPanel.tsx @@ -1,4 +1,5 @@ import { motion, AnimatePresence } from 'motion/react'; +import { useTranslation } from 'react-i18next'; import { X, FolderTree, RefreshCw } from 'lucide-react'; import { Button } from './ui/button'; import { ScrollArea } from './ui/scroll-area'; @@ -34,6 +35,7 @@ const contentVariants = { }; export function FileExplorerPanel({ projectPath }: FileExplorerPanelProps) { + const { t } = useTranslation('common'); const { isOpen, close, clearCache, loadDirectory } = useFileExplorerStore(); const handleRefresh = () => { @@ -80,17 +82,18 @@ export function FileExplorerPanel({ projectPath }: FileExplorerPanelProps) { size="icon" className="h-6 w-6" onClick={handleRefresh} - title="Refresh" + aria-label={t('buttons.refresh')} > - +
diff --git a/apps/frontend/src/renderer/components/FileTreeItem.tsx b/apps/frontend/src/renderer/components/FileTreeItem.tsx index e0af5b9486..d6273b5a2b 100644 --- a/apps/frontend/src/renderer/components/FileTreeItem.tsx +++ b/apps/frontend/src/renderer/components/FileTreeItem.tsx @@ -1,4 +1,5 @@ -import { useState, useRef, useEffect, type DragEvent } from 'react'; +import { useState, useRef, useEffect, type DragEvent, type KeyboardEvent } from 'react'; +import { useTranslation } from 'react-i18next'; import { ChevronRight, ChevronDown, Folder, File, FileCode, FileJson, FileText, FileImage, Loader2 } from 'lucide-react'; import { cn } from '../lib/utils'; import type { FileNode } from '../../shared/types'; @@ -70,6 +71,7 @@ export function FileTreeItem({ isLoading, onToggle, }: FileTreeItemProps) { + const { t } = useTranslation('common'); const [isDragging, setIsDragging] = useState(false); const dragImageRef = useRef(null); @@ -98,6 +100,16 @@ export function FileTreeItem({ } }; + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + e.stopPropagation(); + if (node.isDirectory) { + onToggle(); + } + } + }; + const handleDragStart = (e: DragEvent) => { e.stopPropagation(); setIsDragging(true); @@ -147,37 +159,47 @@ export function FileTreeItem({ return (
{/* Expand/collapse chevron for directories */} {node.isDirectory ? ( ) : ( - +
) : ( -
+
{/* Personal account */} {githubUsername && ( diff --git a/apps/frontend/src/renderer/components/Insights.tsx b/apps/frontend/src/renderer/components/Insights.tsx index 72e01a9af8..3f3a9b5fe6 100644 --- a/apps/frontend/src/renderer/components/Insights.tsx +++ b/apps/frontend/src/renderer/components/Insights.tsx @@ -1,4 +1,5 @@ -import { useState, useEffect, useRef } from 'react'; +import { useState, useEffect, useRef, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; import { MessageSquare, Send, @@ -15,7 +16,7 @@ import { PanelLeftClose, PanelLeft } from 'lucide-react'; -import ReactMarkdown from 'react-markdown'; +import ReactMarkdown, { type Components } from 'react-markdown'; import remarkGfm from 'remark-gfm'; import { Button } from './ui/button'; import { Textarea } from './ui/textarea'; @@ -46,42 +47,40 @@ import { TASK_COMPLEXITY_COLORS } from '../../shared/constants'; -// Safe link renderer for ReactMarkdown to prevent phishing and ensure external links open safely -const SafeLink = ({ href, children, ...props }: React.AnchorHTMLAttributes) => { - // Validate URL - only allow http, https, and relative links - const isValidUrl = href && ( - href.startsWith('http://') || - href.startsWith('https://') || - href.startsWith('/') || - href.startsWith('#') - ); - - if (!isValidUrl) { - // For invalid or potentially malicious URLs, render as plain text - return {children}; - } - - // External links get security attributes - const isExternal = href?.startsWith('http://') || href?.startsWith('https://'); - - return ( - - {children} - - ); -}; +// createSafeLink - factory function that creates a SafeLink component with i18n support +const createSafeLink = (opensInNewWindowText: string) => { + return function SafeLink({ href, children, ...props }: React.AnchorHTMLAttributes) { + // Validate URL - only allow http, https, and relative links + const isValidUrl = href && ( + href.startsWith('http://') || + href.startsWith('https://') || + href.startsWith('/') || + href.startsWith('#') + ); + + if (!isValidUrl) { + // For invalid or potentially malicious URLs, render as plain text + return {children}; + } -// Markdown components with safe link rendering -const markdownComponents = { - a: SafeLink, + // External links get security attributes and accessibility indicator + const isExternal = href?.startsWith('http://') || href?.startsWith('https://'); + + return ( + + {children} + {isExternal && {opensInNewWindowText}} + + ); + }; }; interface InsightsProps { @@ -89,6 +88,7 @@ interface InsightsProps { } export function Insights({ projectId }: InsightsProps) { + const { t } = useTranslation('common'); const session = useInsightsStore((state) => state.session); const sessions = useInsightsStore((state) => state.sessions); const status = useInsightsStore((state) => state.status); @@ -96,6 +96,11 @@ export function Insights({ projectId }: InsightsProps) { const currentTool = useInsightsStore((state) => state.currentTool); const isLoadingSessions = useInsightsStore((state) => state.isLoadingSessions); + // Create markdown components with translated accessibility text + const markdownComponents = useMemo(() => ({ + a: createSafeLink(t('accessibility.opensInNewWindow')), + }), [t]); + const [inputValue, setInputValue] = useState(''); const [creatingTask, setCreatingTask] = useState(null); const [taskCreated, setTaskCreated] = useState>(new Set()); @@ -295,6 +300,7 @@ export function Insights({ projectId }: InsightsProps) { handleCreateTask(message)} isCreatingTask={creatingTask === message.id} taskCreated={taskCreated.has(message.id)} @@ -387,6 +393,7 @@ export function Insights({ projectId }: InsightsProps) { interface MessageBubbleProps { message: InsightsChatMessage; + markdownComponents: Components; onCreateTask: () => void; isCreatingTask: boolean; taskCreated: boolean; @@ -394,6 +401,7 @@ interface MessageBubbleProps { function MessageBubble({ message, + markdownComponents, onCreateTask, isCreatingTask, taskCreated diff --git a/apps/frontend/src/renderer/components/KanbanBoard.tsx b/apps/frontend/src/renderer/components/KanbanBoard.tsx index de2ad394f4..4eb3c134ab 100644 --- a/apps/frontend/src/renderer/components/KanbanBoard.tsx +++ b/apps/frontend/src/renderer/components/KanbanBoard.tsx @@ -22,6 +22,7 @@ import { import { Plus, Inbox, Loader2, Eye, CheckCircle2, Archive, RefreshCw } from 'lucide-react'; import { ScrollArea } from './ui/scroll-area'; import { Button } from './ui/button'; +import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip'; import { TaskCard } from './TaskCard'; import { SortableTaskCard } from './SortableTaskCard'; import { TASK_STATUS_COLUMNS, TASK_STATUS_LABELS } from '../../shared/constants'; @@ -41,9 +42,13 @@ interface DroppableColumnProps { status: TaskStatus; tasks: Task[]; onTaskClick: (task: Task) => void; + onStatusChange: (taskId: string, newStatus: TaskStatus) => unknown; isOver: boolean; onAddClick?: () => void; onArchiveAll?: () => void; + archivedCount?: number; + showArchived?: boolean; + onToggleArchived?: () => void; } /** @@ -81,8 +86,12 @@ function droppableColumnPropsAreEqual( if (prevProps.status !== nextProps.status) return false; if (prevProps.isOver !== nextProps.isOver) return false; if (prevProps.onTaskClick !== nextProps.onTaskClick) return false; + if (prevProps.onStatusChange !== nextProps.onStatusChange) return false; if (prevProps.onAddClick !== nextProps.onAddClick) return false; if (prevProps.onArchiveAll !== nextProps.onArchiveAll) return false; + if (prevProps.archivedCount !== nextProps.archivedCount) return false; + if (prevProps.showArchived !== nextProps.showArchived) return false; + if (prevProps.onToggleArchived !== nextProps.onToggleArchived) return false; // Deep compare tasks const tasksEqual = tasksAreEquivalent(prevProps.tasks, nextProps.tasks); @@ -136,8 +145,8 @@ const getEmptyStateContent = (status: TaskStatus, t: (key: string) => string): { } }; -const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskClick, isOver, onAddClick, onArchiveAll }: DroppableColumnProps) { - const { t } = useTranslation('tasks'); +const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskClick, onStatusChange, isOver, onAddClick, onArchiveAll, archivedCount, showArchived, onToggleArchived }: DroppableColumnProps) { + const { t } = useTranslation(['tasks', 'common']); const { setNodeRef } = useDroppable({ id: status }); @@ -154,6 +163,15 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli return handlers; }, [tasks, onTaskClick]); + // Create stable onStatusChange handlers for each task + const onStatusChangeHandlers = useMemo(() => { + const handlers = new Map unknown>(); + tasks.forEach((task) => { + handlers.set(task.id, (newStatus: TaskStatus) => onStatusChange(task.id, newStatus)); + }); + return handlers; + }, [tasks, onStatusChange]); + // Memoize task card elements to prevent recreation on every render const taskCards = useMemo(() => { if (tasks.length === 0) return null; @@ -162,9 +180,10 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli key={task.id} task={task} onClick={onClickHandlers.get(task.id)!} + onStatusChange={onStatusChangeHandlers.get(task.id)} /> )); - }, [tasks, onClickHandlers]); + }, [tasks, onClickHandlers, onStatusChangeHandlers]); const getColumnBorderColor = (): string => { switch (status) { @@ -199,7 +218,7 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli

- {TASK_STATUS_LABELS[status]} + {t(TASK_STATUS_LABELS[status])}

{tasks.length} @@ -212,21 +231,48 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli size="icon" className="h-7 w-7 hover:bg-primary/10 hover:text-primary transition-colors" onClick={onAddClick} + aria-label={t('kanban.addTaskAriaLabel')} > )} - {status === 'done' && onArchiveAll && tasks.length > 0 && ( + {status === 'done' && onArchiveAll && tasks.length > 0 && !showArchived && ( )} + {status === 'done' && archivedCount !== undefined && archivedCount > 0 && onToggleArchived && ( + + + + + + {showArchived ? t('common:projectTab.hideArchived') : t('common:projectTab.showArchived')} + + + )}
@@ -277,11 +323,17 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli ); }, droppableColumnPropsAreEqual); -export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick }: KanbanBoardProps) { +export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick, onRefresh, isRefreshing }: KanbanBoardProps) { const { t } = useTranslation('tasks'); const [activeTask, setActiveTask] = useState(null); const [overColumnId, setOverColumnId] = useState(null); - const { showArchived } = useViewState(); + const { showArchived, toggleShowArchived } = useViewState(); + + // Calculate archived count for Done column button + const archivedCount = useMemo(() => + tasks.filter(t => t.metadata?.archivedAt).length, + [tasks] + ); // Filter tasks based on archive status const filteredTasks = useMemo(() => { @@ -412,6 +464,21 @@ export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick }: KanbanBoardP return (
+ {/* Kanban header with refresh button */} + {onRefresh && ( +
+ +
+ )} {/* Kanban columns */} ))}
diff --git a/apps/frontend/src/renderer/components/ProjectTabBar.tsx b/apps/frontend/src/renderer/components/ProjectTabBar.tsx index ef6e34d25d..7836b8c77a 100644 --- a/apps/frontend/src/renderer/components/ProjectTabBar.tsx +++ b/apps/frontend/src/renderer/components/ProjectTabBar.tsx @@ -1,4 +1,5 @@ import { useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; import { Plus } from 'lucide-react'; import { cn } from '../lib/utils'; import { Button } from './ui/button'; @@ -15,9 +16,6 @@ interface ProjectTabBarProps { className?: string; // Control props for active tab onSettingsClick?: () => void; - showArchived?: boolean; - archivedCount?: number; - onToggleArchived?: () => void; } export function ProjectTabBar({ @@ -27,11 +25,10 @@ export function ProjectTabBar({ onProjectClose, onAddProject, className, - onSettingsClick, - showArchived, - archivedCount, - onToggleArchived + onSettingsClick }: ProjectTabBarProps) { + const { t } = useTranslation('common'); + // Keyboard shortcuts for tab navigation useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { @@ -109,9 +106,6 @@ export function ProjectTabBar({ }} // Pass control props only for active tab onSettingsClick={isActiveTab ? onSettingsClick : undefined} - showArchived={isActiveTab ? showArchived : undefined} - archivedCount={isActiveTab ? archivedCount : undefined} - onToggleArchived={isActiveTab ? onToggleArchived : undefined} /> ); })} @@ -124,7 +118,7 @@ export function ProjectTabBar({ size="icon" className="h-8 w-8" onClick={onAddProject} - title="Add Project" + aria-label={t('projectTab.addProjectAriaLabel')} > diff --git a/apps/frontend/src/renderer/components/RateLimitModal.tsx b/apps/frontend/src/renderer/components/RateLimitModal.tsx index a364016595..b19c842afd 100644 --- a/apps/frontend/src/renderer/components/RateLimitModal.tsx +++ b/apps/frontend/src/renderer/components/RateLimitModal.tsx @@ -375,9 +375,11 @@ export function RateLimitModal() { size="sm" className="gap-2" onClick={handleUpgrade} + aria-label={t('accessibility.upgradeSubscriptionAriaLabel')} > - +
diff --git a/apps/frontend/src/renderer/components/Sidebar.tsx b/apps/frontend/src/renderer/components/Sidebar.tsx index ce40585225..a2432479d3 100644 --- a/apps/frontend/src/renderer/components/Sidebar.tsx +++ b/apps/frontend/src/renderer/components/Sidebar.tsx @@ -20,7 +20,9 @@ import { Sparkles, GitBranch, HelpCircle, - Wrench + Wrench, + Cloud, + ListTodo } from 'lucide-react'; import { Button } from './ui/button'; import { ScrollArea } from './ui/scroll-area'; @@ -52,7 +54,7 @@ import { RateLimitIndicator } from './RateLimitIndicator'; import { ClaudeCodeStatusBadge } from './ClaudeCodeStatusBadge'; import type { Project, AutoBuildVersionInfo, GitStatus, ProjectEnvConfig } from '../../shared/types'; -export type SidebarView = 'kanban' | 'terminals' | 'roadmap' | 'context' | 'ideation' | 'github-issues' | 'gitlab-issues' | 'github-prs' | 'gitlab-merge-requests' | 'changelog' | 'insights' | 'worktrees' | 'agent-tools'; +export type SidebarView = 'kanban' | 'terminals' | 'roadmap' | 'context' | 'ideation' | 'github-issues' | 'gitlab-issues' | 'github-prs' | 'gitlab-merge-requests' | 'ado-work-items' | 'ado-pull-requests' | 'changelog' | 'insights' | 'worktrees' | 'agent-tools'; interface SidebarProps { onSettingsClick: () => void; @@ -93,6 +95,12 @@ const gitlabNavItems: NavItem[] = [ { id: 'gitlab-merge-requests', labelKey: 'navigation:items.gitlabMRs', icon: GitMerge, shortcut: 'R' } ]; +// Azure DevOps nav items shown when ADO is enabled +const adoNavItems: NavItem[] = [ + { id: 'ado-work-items', labelKey: 'navigation:items.adoWorkItems', icon: Cloud, shortcut: 'O' }, + { id: 'ado-pull-requests', labelKey: 'navigation:items.adoPRs', icon: ListTodo, shortcut: 'V' } +]; + export function Sidebar({ onSettingsClick, onNewTaskClick, @@ -136,7 +144,7 @@ export function Sidebar({ loadEnvConfig(); }, [selectedProject?.id, selectedProject?.autoBuildPath]); - // Compute visible nav items based on GitHub/GitLab enabled state + // Compute visible nav items based on GitHub/GitLab/ADO enabled state const visibleNavItems = useMemo(() => { const items = [...baseNavItems]; @@ -148,8 +156,12 @@ export function Sidebar({ items.push(...gitlabNavItems); } + if (envConfig?.adoEnabled) { + items.push(...adoNavItems); + } + return items; - }, [envConfig?.githubEnabled, envConfig?.gitlabEnabled]); + }, [envConfig?.githubEnabled, envConfig?.gitlabEnabled, envConfig?.adoEnabled]); // Keyboard shortcuts useEffect(() => { @@ -276,6 +288,7 @@ export function Sidebar({ key={item.id} onClick={() => handleNavClick(item.id)} disabled={!selectedProjectId} + aria-keyshortcuts={item.shortcut} className={cn( 'flex w-full items-center gap-3 rounded-lg px-3 py-2.5 text-sm transition-all duration-200', 'hover:bg-accent hover:text-accent-foreground', @@ -354,6 +367,7 @@ export function Sidebar({ variant="ghost" size="icon" onClick={() => window.open('https://github.com/AndyMik90/Auto-Claude/issues', '_blank')} + aria-label={t('tooltips.help')} > diff --git a/apps/frontend/src/renderer/components/SortableProjectTab.tsx b/apps/frontend/src/renderer/components/SortableProjectTab.tsx index dc53e991ad..d57cf1292c 100644 --- a/apps/frontend/src/renderer/components/SortableProjectTab.tsx +++ b/apps/frontend/src/renderer/components/SortableProjectTab.tsx @@ -1,7 +1,7 @@ import { useSortable } from '@dnd-kit/sortable'; import { CSS } from '@dnd-kit/utilities'; import { useTranslation } from 'react-i18next'; -import { Settings2, Archive } from 'lucide-react'; +import { Settings2 } from 'lucide-react'; import { cn } from '../lib/utils'; import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip'; import type { Project } from '../../shared/types'; @@ -15,9 +15,6 @@ interface SortableProjectTabProps { onClose: (e: React.MouseEvent) => void; // Optional control props for active tab onSettingsClick?: () => void; - showArchived?: boolean; - archivedCount?: number; - onToggleArchived?: () => void; } // Detect if running on macOS for keyboard shortcut display @@ -31,10 +28,7 @@ export function SortableProjectTab({ tabIndex, onSelect, onClose, - onSettingsClick, - showArchived, - archivedCount, - onToggleArchived + onSettingsClick }: SortableProjectTabProps) { const { t } = useTranslation('common'); // Build tooltip with keyboard shortcut hint (only for tabs 1-9) @@ -148,42 +142,6 @@ export function SortableProjectTab({ )} - - {/* Archive toggle button with badge - responsive sizing */} - {onToggleArchived && ( - - - - - - {showArchived ? t('projectTab.hideArchived') : t('projectTab.showArchived')} - - - )} )} @@ -202,7 +160,7 @@ export function SortableProjectTab({ isActive && 'opacity-100' )} onClick={onClose} - aria-label={t('projectTab.closeTab')} + aria-label={t('projectTab.closeTabAriaLabel')} > diff --git a/apps/frontend/src/renderer/components/SortableTaskCard.tsx b/apps/frontend/src/renderer/components/SortableTaskCard.tsx index a23bac9224..b830a1817d 100644 --- a/apps/frontend/src/renderer/components/SortableTaskCard.tsx +++ b/apps/frontend/src/renderer/components/SortableTaskCard.tsx @@ -3,11 +3,12 @@ import { useSortable } from '@dnd-kit/sortable'; import { CSS } from '@dnd-kit/utilities'; import { TaskCard } from './TaskCard'; import { cn } from '../lib/utils'; -import type { Task } from '../../shared/types'; +import type { Task, TaskStatus } from '../../shared/types'; interface SortableTaskCardProps { task: Task; onClick: () => void; + onStatusChange?: (newStatus: TaskStatus) => unknown; } // Custom comparator - only re-render when task or onClick actually changed @@ -19,11 +20,12 @@ function sortableTaskCardPropsAreEqual( // for the task object and onClick handler return ( prevProps.task === nextProps.task && - prevProps.onClick === nextProps.onClick + prevProps.onClick === nextProps.onClick && + prevProps.onStatusChange === nextProps.onStatusChange ); } -export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick }: SortableTaskCardProps) { +export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick, onStatusChange }: SortableTaskCardProps) { const { attributes, listeners, @@ -58,7 +60,7 @@ export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick } {...attributes} {...listeners} > - + ); }, sortableTaskCardPropsAreEqual); diff --git a/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx b/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx new file mode 100644 index 0000000000..ad6f421da7 --- /dev/null +++ b/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx @@ -0,0 +1,83 @@ +import React from 'react'; +import { useSortable } from '@dnd-kit/sortable'; +import { CSS } from '@dnd-kit/utilities'; +import type { Task } from '../../shared/types'; +import { Terminal } from './Terminal'; +import { cn } from '../lib/utils'; + +interface SortableTerminalWrapperProps { + id: string; + cwd?: string; + projectPath?: string; + isActive: boolean; + onClose: () => void; + onActivate: () => void; + tasks: Task[]; + onNewTaskClick?: () => void; + terminalCount: number; + isExpanded?: boolean; + onToggleExpand?: () => void; +} + +export function SortableTerminalWrapper({ + id, + cwd, + projectPath, + isActive, + onClose, + onActivate, + tasks, + onNewTaskClick, + terminalCount, + isExpanded, + onToggleExpand, +}: SortableTerminalWrapperProps) { + const { + attributes, + listeners, + setNodeRef, + transform, + transition, + isDragging, + } = useSortable({ + id, + data: { + type: 'terminal-panel', + terminalId: id, + }, + }); + + const style = { + transform: CSS.Transform.toString(transform), + transition, + zIndex: isDragging ? 50 : undefined, + }; + + return ( +
+ +
+ ); +} diff --git a/apps/frontend/src/renderer/components/TaskCard.tsx b/apps/frontend/src/renderer/components/TaskCard.tsx index 87ee9751cb..f07db15b13 100644 --- a/apps/frontend/src/renderer/components/TaskCard.tsx +++ b/apps/frontend/src/renderer/components/TaskCard.tsx @@ -1,9 +1,17 @@ import { useState, useEffect, useRef, useCallback, memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { Play, Square, Clock, Zap, Target, Shield, Gauge, Palette, FileCode, Bug, Wrench, Loader2, AlertTriangle, RotateCcw, Archive } from 'lucide-react'; +import { Play, Square, Clock, Zap, Target, Shield, Gauge, Palette, FileCode, Bug, Wrench, Loader2, AlertTriangle, RotateCcw, Archive, MoreVertical } from 'lucide-react'; import { Card, CardContent } from './ui/card'; import { Badge } from './ui/badge'; import { Button } from './ui/button'; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from './ui/dropdown-menu'; import { cn, formatRelativeTime, sanitizeMarkdownForDisplay } from '../lib/utils'; import { PhaseProgressIndicator } from './PhaseProgressIndicator'; import { @@ -16,10 +24,12 @@ import { TASK_PRIORITY_COLORS, TASK_PRIORITY_LABELS, EXECUTION_PHASE_LABELS, - EXECUTION_PHASE_BADGE_COLORS + EXECUTION_PHASE_BADGE_COLORS, + TASK_STATUS_COLUMNS, + TASK_STATUS_LABELS } from '../../shared/constants'; import { startTask, stopTask, checkTaskRunning, recoverStuckTask, isIncompleteHumanReview, archiveTasks } from '../stores/task-store'; -import type { Task, TaskCategory, ReviewReason } from '../../shared/types'; +import type { Task, TaskCategory, ReviewReason, TaskStatus } from '../../shared/types'; // Category icon mapping const CategoryIcon: Record = { @@ -37,6 +47,7 @@ const CategoryIcon: Record = { interface TaskCardProps { task: Task; onClick: () => void; + onStatusChange?: (newStatus: TaskStatus) => unknown; } // Custom comparator for React.memo - only re-render when relevant task data changes @@ -45,7 +56,7 @@ function taskCardPropsAreEqual(prevProps: TaskCardProps, nextProps: TaskCardProp const nextTask = nextProps.task; // Fast path: same reference - if (prevTask === nextTask && prevProps.onClick === nextProps.onClick) { + if (prevTask === nextTask && prevProps.onClick === nextProps.onClick && prevProps.onStatusChange === nextProps.onStatusChange) { return true; } @@ -83,7 +94,7 @@ function taskCardPropsAreEqual(prevProps: TaskCardProps, nextProps: TaskCardProp return isEqual; } -export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) { +export const TaskCard = memo(function TaskCard({ task, onClick, onStatusChange }: TaskCardProps) { const { t } = useTranslation('tasks'); const [isStuck, setIsStuck] = useState(false); const [isRecovering, setIsRecovering] = useState(false); @@ -100,8 +111,9 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) const isIncomplete = isIncompleteHumanReview(task); // Memoize expensive computations to avoid running on every render + // Truncate description for card display - full description shown in modal const sanitizedDescription = useMemo( - () => task.description ? sanitizeMarkdownForDisplay(task.description, 150) : null, + () => task.description ? sanitizeMarkdownForDisplay(task.description, 120) : null, [task.description] ); @@ -111,12 +123,40 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) [task.updatedAt] ); + // Memoize status menu items to avoid recreating on every render + const statusMenuItems = useMemo(() => { + if (!onStatusChange) return null; + return TASK_STATUS_COLUMNS.filter(status => status !== task.status).map((status) => ( + onStatusChange(status)} + > + {t(TASK_STATUS_LABELS[status])} + + )); + }, [task.status, onStatusChange, t]); + // Memoized stuck check function to avoid recreating on every render const performStuckCheck = useCallback(() => { + // IMPORTANT: If the execution phase is 'complete' or 'failed', the task is NOT stuck. + // It means the process has finished and status update is pending. + // This prevents false-positive "stuck" indicators when the process exits normally. + const currentPhase = task.executionProgress?.phase; + if (currentPhase === 'complete' || currentPhase === 'failed') { + setIsStuck(false); + return; + } + // Use requestIdleCallback for non-blocking check when available const doCheck = () => { checkTaskRunning(task.id).then((actuallyRunning) => { - setIsStuck(!actuallyRunning); + // Double-check the phase again in case it changed while waiting + const latestPhase = task.executionProgress?.phase; + if (latestPhase === 'complete' || latestPhase === 'failed') { + setIsStuck(false); + } else { + setIsStuck(!actuallyRunning); + } }); }; @@ -125,7 +165,7 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) } else { doCheck(); } - }, [task.id]); + }, [task.id, task.executionProgress?.phase]); // Check if task is stuck (status says in_progress but no actual process) // Add a longer grace period to avoid false positives during process spawn @@ -268,15 +308,24 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) onClick={onClick} > - {/* Header - improved visual hierarchy */} -
-

- {task.title} -

-
+ {/* Title - full width, no wrapper */} +

+ {task.title} +

+ + {/* Description - sanitized to handle markdown content (memoized) */} + {sanitizedDescription && ( +

+ {sanitizedDescription} +

+ )} + + {/* Metadata badges */} + {(task.metadata || isStuck || isIncomplete || hasActiveExecution || reviewReasonInfo) && ( +
{/* Stuck indicator - highest priority */} {isStuck && ( )} -
-
- - {/* Description - sanitized to handle markdown content (memoized) */} - {sanitizedDescription && ( -

- {sanitizedDescription} -

- )} - - {/* Metadata badges */} - {task.metadata && ( -
{/* Category badge with icon */} - {task.metadata.category && ( + {task.metadata?.category && ( )} {/* Impact badge - high visibility for important tasks */} - {task.metadata.impact && (task.metadata.impact === 'high' || task.metadata.impact === 'critical') && ( + {task.metadata?.impact && (task.metadata.impact === 'high' || task.metadata.impact === 'critical') && ( )} {/* Complexity badge */} - {task.metadata.complexity && ( + {task.metadata?.complexity && ( )} {/* Priority badge - only show urgent/high */} - {task.metadata.priority && (task.metadata.priority === 'urgent' || task.metadata.priority === 'high') && ( + {task.metadata?.priority && (task.metadata.priority === 'urgent' || task.metadata.priority === 'high') && ( )} {/* Security severity - always show */} - {task.metadata.securitySeverity && ( + {task.metadata?.securitySeverity && ( - {task.metadata.securitySeverity} severity + {task.metadata.securitySeverity} {t('metadata.severity')} )}
@@ -424,68 +460,92 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) {relativeTime}
- {/* Action buttons */} - {isStuck ? ( - - ) : isIncomplete ? ( - - ) : task.status === 'done' && !task.metadata?.archivedAt ? ( - - ) : (task.status === 'backlog' || task.status === 'in_progress') && ( - - )} +
+ {/* Action buttons */} + {isStuck ? ( + + ) : isIncomplete ? ( + + ) : task.status === 'done' && !task.metadata?.archivedAt ? ( + + ) : (task.status === 'backlog' || task.status === 'in_progress') && ( + + )} + + {/* Move to menu for keyboard accessibility */} + {statusMenuItems && ( + + + + + e.stopPropagation()}> + {t('actions.moveTo')} + + {statusMenuItems} + + + )} +
diff --git a/apps/frontend/src/renderer/components/TaskCreationWizard.tsx b/apps/frontend/src/renderer/components/TaskCreationWizard.tsx index be45fd17ed..4bbed28a5f 100644 --- a/apps/frontend/src/renderer/components/TaskCreationWizard.tsx +++ b/apps/frontend/src/renderer/components/TaskCreationWizard.tsx @@ -1,4 +1,5 @@ import { useState, useEffect, useCallback, useRef, useMemo, type ClipboardEvent, type DragEvent } from 'react'; +import { useTranslation } from 'react-i18next'; import { Loader2, ChevronDown, ChevronUp, Image as ImageIcon, X, RotateCcw, FolderTree, GitBranch } from 'lucide-react'; import { Dialog, @@ -59,6 +60,7 @@ export function TaskCreationWizard({ open, onOpenChange }: TaskCreationWizardProps) { + const { t } = useTranslation('tasks'); // Get selected agent profile from settings const { settings } = useSettingsStore(); const selectedProfile = DEFAULT_AGENT_PROFILES.find( @@ -622,11 +624,12 @@ export function TaskCreationWizard({ if (impact) metadata.impact = impact; if (model) metadata.model = model; if (thinkingLevel) metadata.thinkingLevel = thinkingLevel; - // Auto profile - per-phase configuration - if (profileId === 'auto') { + // All profiles now support per-phase configuration + // isAutoProfile indicates task uses phase-specific models/thinking + if (phaseModels && phaseThinking) { metadata.isAutoProfile = true; - if (phaseModels) metadata.phaseModels = phaseModels; - if (phaseThinking) metadata.phaseThinking = phaseThinking; + metadata.phaseModels = phaseModels; + metadata.phaseThinking = phaseThinking; } if (images.length > 0) metadata.attachedImages = images; if (allReferencedFiles.length > 0) metadata.referencedFiles = allReferencedFiles; @@ -796,6 +799,8 @@ export function TaskCreationWizard({ onDrop={handleTextareaDrop} rows={5} disabled={isCreating} + aria-required="true" + aria-describedby="description-help" className={cn( "resize-y min-h-[120px] max-h-[400px] relative bg-transparent", // Visual feedback when dragging over textarea @@ -814,7 +819,7 @@ export function TaskCreationWizard({ /> )} -

+

Files and images can be copy/pasted or dragged & dropped into the description.

@@ -851,6 +856,7 @@ export function TaskCreationWizard({ e.stopPropagation(); setImages(prev => prev.filter(img => img.id !== image.id)); }} + aria-label={t('images.removeImageAriaLabel', { filename: image.filename })} > @@ -914,6 +920,8 @@ export function TaskCreationWizard({ 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isCreating} + aria-expanded={showAdvanced} + aria-controls="advanced-options-section" > Classification (optional) {showAdvanced ? ( @@ -925,7 +933,7 @@ export function TaskCreationWizard({ {/* Advanced Options */} {showAdvanced && ( -
+
{/* Category */}
@@ -1057,6 +1065,8 @@ export function TaskCreationWizard({ 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isCreating} + aria-expanded={showGitOptions} + aria-controls="git-options-section" > @@ -1076,7 +1086,7 @@ export function TaskCreationWizard({ {/* Git Options */} {showGitOptions && ( -
+